repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kittiu/odoo | addons/mrp_operations/report/mrp_code_barcode.py | 381 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Philmod/mongo-connector | mongo_connector/doc_managers/solr_doc_manager.py | 8 | 11225 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for the Solr search engine, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import re
import json
from pysolr import Solr, SolrError
from mongo_connector import errors
from mongo_connector.compat import u
from mongo_connector.constants import (DEFAULT_COMMIT_INTERVAL,
DEFAULT_MAX_BULK)
from mongo_connector.util import retry_until_ok
from mongo_connector.doc_managers import DocManagerBase, exception_wrapper
from mongo_connector.doc_managers.formatters import DocumentFlattener
# pysolr only has 1 exception: SolrError
wrap_exceptions = exception_wrapper({
SolrError: errors.OperationFailed})
ADMIN_URL = 'admin/luke?show=schema&wt=json'
decoder = json.JSONDecoder()
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.chunk_size = chunk_size
self.field_list = []
self._build_fields()
self._formatter = DocumentFlattener()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
@wrap_exceptions
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
if '_id' in doc:
doc[self.unique_key] = u(doc.pop("_id"))
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
def apply_update(self, doc, update_spec):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
if not '$set' in update_spec and not '$unset' in update_spec:
# update spec contains the new document
update_spec['_ts'] = doc['_ts']
update_spec['ns'] = doc['ns']
update_spec['_id'] = doc['_id']
return update_spec
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
# MongoDB < 2.5.2 reports $unset for fields that don't exist within
# the document being updated.
keys_to_pop = []
for key in doc:
if key.startswith(to_unset):
if key == to_unset or key[len(to_unset)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
return doc
@wrap_exceptions
def update(self, doc, update_spec):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
# Commit outstanding changes so that the document to be updated is the
# same version to which the changes apply.
self.commit()
query = "%s:%s" % (self.unique_key, u(doc['_id']))
results = self.solr.search(query)
if not len(results):
# Document may not be retrievable yet
self.commit()
results = self.solr.search(query)
# Results is an iterable containing only 1 result
for doc in results:
updated = self.apply_update(doc, update_spec)
# A _version_ of 0 will always apply the update
updated['_version_'] = 0
self.upsert(updated)
return updated
@wrap_exceptions
def upsert(self, doc):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc)],
commit=(self.auto_commit_interval == 0),
commitWithin=u(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc)], commit=False)
@wrap_exceptions
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": str(self.auto_commit_interval)
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs)
@wrap_exceptions
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=u(doc["_id"]),
commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _remove(self):
"""Removes everything
"""
self.solr.delete(q='*:*', commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _stream_search(self, query):
"""Helper method for iterating over Solr search results."""
for doc in self.solr.search(query, rows=100000000):
if self.unique_key != "_id":
doc["_id"] = doc.pop(self.unique_key)
yield doc
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range."""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self._stream_search(query)
@wrap_exceptions
def _search(self, query):
"""For test purposes only. Performs search on Solr with given query
Does not have to be implemented.
"""
return self._stream_search(query)
def commit(self):
"""This function is used to force a commit.
"""
retry_until_ok(self.solr.commit)
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
for r in result:
r['_id'] = r.pop(self.unique_key)
return r
| apache-2.0 |
Srisai85/scipy | scipy/linalg/tests/test_lapack.py | 28 | 17002 | #!/usr/bin/env python
#
# Created by: Pearu Peterson, September 2002
#
from __future__ import division, print_function, absolute_import
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_almost_equal, assert_, assert_raises, assert_allclose, \
assert_almost_equal
import numpy as np
from scipy.linalg import _flapack as flapack
from scipy.linalg import inv
from scipy.linalg import svd
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
class TestFlapackSimple(TestCase):
def test_gebal(self):
a = [[1,2,3],[4,5,6],[7,8,9]]
a1 = [[1,0,0,3e-4],
[4,0,0,2e-3],
[7,1,0,0],
[0,1,0,0]]
for p in 'sdzc':
f = getattr(flapack,p+'gebal',None)
if f is None:
continue
ba,lo,hi,pivscale,info = f(a)
assert_(not info,repr(info))
assert_array_almost_equal(ba,a)
assert_equal((lo,hi),(0,len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba,lo,hi,pivscale,info = f(a1,permute=1,scale=1)
assert_(not info,repr(info))
# print a1
# print ba,lo,hi,pivscale
def test_gehrd(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack,p+'gehrd',None)
if f is None:
continue
ht,tau,info = f(a)
assert_(not info,repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1), scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1), scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0,0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm, a1)
if norm in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm in 'Mm':
ref = np.max(np.abs(a1))
elif norm in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(TestCase):
def test_flapack(self):
if hasattr(flapack,'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack,'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(TestCase):
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0,2.0],
[4.0,5.0],
[7.0,8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd','gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work,iwork,info = gelsd_lwork(m,n,nrhs,-1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j,2.0],
[4.0+0.5j,5.0-3.0j],
[7.0-2.0j,8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd','gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m,n,nrhs,-1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0,2.0],
[4.0,5.0],
[7.0,8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss','gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work,info = gelss_lwork(m,n,nrhs,-1)
lwork = int(np.real(work))
v,x,s,rank,work,info = gelss(a1, b1,-1,lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j,2.0],
[4.0+0.5j,5.0-3.0j],
[7.0-2.0j,8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss','gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work,info = gelss_lwork(m,n,nrhs,-1)
lwork = int(np.real(work))
v,x,s,rank,work,info = gelss(a1, b1,-1,lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0,2.0],
[4.0,5.0],
[7.0,8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy','gelss_lwork'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m,n,nrhs,10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1],1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j,2.0],
[4.0+0.5j,5.0-3.0j],
[7.0-2.0j,8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy','gelss_lwork'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m,n,nrhs,10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1],1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
class TestRegression(TestCase):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(TestCase):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(TestCase):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1,len(m_vec) - 1)))), m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], (sigmas[0] +
it_len*np.sqrt(np.sum(np.power(m_vec,2))),)))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4',(sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0),"LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)),"There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.ones(4, dtype) * 3
v = np.ones(4, dtype) * 4
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5,5,5,5],[0,0,0,0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5,5,3,3],[0,0,f,f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2,offy=2), [[3,3,5,5],[f,f,0,0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2), [[5,3,5,3],[f,f,0,0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2), [[3,3,5,5],[0,f,0,f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1), [[3,3,5,3],[f,f,0,f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2), [[5,3,5,3],[0,f,0,f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5,5,5,5], atol=atol)
assert_allclose(b, [0,0,0,0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4,4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4,4)) + 1j*np.random.random((4,4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1,0], a[2:,0])
# create expected output
expected = np.zeros_like(a[:,0])
expected[0] = a[0,0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:,0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:,:] = larf(v, tau.conjugate(), a[1:,:], np.zeros(a.shape[1]))
# apply transform from the right
a[:,1:] = larf(v, tau, a[:,1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:,0], expected, atol=1e-5)
assert_allclose(a[0,:], expected, atol=1e-5)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
edx/edx-analytics-dashboard | analytics_dashboard/learner_analytics_api/v0/views.py | 1 | 6267 | from requests.exceptions import ConnectTimeout
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .clients import LearnerAPIClient
from .permissions import HasCourseAccessPermission
from .renderers import TextRenderer
# TODO: Consider caching responses from the data api when working on AN-6157
class BaseLearnerApiView(RetrieveAPIView):
permission_classes = (IsAuthenticated, HasCourseAccessPermission,)
# Serialize the the Learner Analytics API response to JSON, by default.
serializer_type = 'json'
# Do not return the HTTP headers from the Data API, by default.
# This will be further investigated in AN-6928.
include_headers = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = LearnerAPIClient(serializer_type=self.serializer_type)
def get_queryset(self):
"""
DRF requires that we override this method. Since we don't actually use
querysets/django models in this API, this method doesn't have to return
anything.
"""
@property
def course_id(self):
"""
Gets the course_id either from the URL or the querystring parameters.
"""
course_id = getattr(self.request, 'course_id')
if not course_id:
course_id = self.request.query_params.get('course_id')
return course_id
def get(self, request, *args, **kwargs):
"""
Return the response from the Data API.
"""
api_response = self.get_api_response(request, *args, **kwargs)
response_kwargs = dict(
data=api_response.serialized_content,
status=api_response.status_code,
)
if self.include_headers:
response_kwargs['headers'] = api_response.headers
return Response(**response_kwargs)
def get_api_response(self, request, *args, **kwargs):
"""
Fetch the response from the API.
Must be implemented by subclasses.
"""
raise NotImplementedError('Override this method to return the Learner Analytics API response for this view.')
def handle_exception(self, exc):
"""
Handles timeouts raised by the API client by returning an HTTP
504.
"""
if isinstance(exc, ConnectTimeout):
return Response(
data={'developer_message': 'Learner Analytics API timed out.', 'error_code': 'analytics_api_timeout'},
status=504
)
return super().handle_exception(exc)
class DownloadLearnerApiViewMixin:
"""
Requests text/csv data from the Learner Analytics API, and ensures that the REST framework returns it unparsed,
including the response headers.
"""
include_headers = True
content_type = 'text/csv'
serializer_type = 'text'
def get_api_response(self, request, **kwargs):
"""
Sets the HTTP_ACCEPT header on the request to tell the Learner Analytics API which format to return its data in.
And tells the REST framework to render as text. NB: parent class must also define get_api_response()
"""
request.META['Accept'] = self.content_type
request.accepted_renderer = TextRenderer()
return super().get_api_response(request, **kwargs)
class NotFoundLearnerApiViewMixin:
"""
Returns 404s rather than 403s when PermissionDenied exceptions are raised.
"""
@property
def not_found_developer_message(self):
raise NotImplementedError('Override this attribute to define the developer message returned with 404s.')
@property
def not_found_error_code(self):
raise NotImplementedError('Override this attribute to define the error_code string returned with 404s.')
def handle_exception(self, exc):
if isinstance(exc, PermissionDenied):
return Response(
data={'developer_message': self.not_found_developer_message, 'error_code': self.not_found_error_code},
status=404
)
return super().handle_exception(exc)
class LearnerDetailView(NotFoundLearnerApiViewMixin, BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Learner Detail endpoint.
"""
not_found_error_code = 'no_learner_for_course'
@property
def not_found_developer_message(self):
message = 'Learner {} not found'.format(self.kwargs.get('username', ''))
message += f'for course {self.course_id}.' if self.course_id else '.'
return message
def get_api_response(self, request, username, **kwargs):
return self.client.learners(username).get(**request.query_params)
class LearnerListView(BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Learner List endpoint.
"""
def get_api_response(self, request, **kwargs):
return self.client.learners.get(**request.query_params)
class LearnerListCSV(DownloadLearnerApiViewMixin, LearnerListView):
"""
Forwards text/csv requests to the Learner Analytics API's Learner List endpoint,
and returns a simple text response.
"""
class EngagementTimelinesView(NotFoundLearnerApiViewMixin, BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Engagement Timeline
endpoint.
"""
not_found_error_code = 'no_learner_engagement_timeline'
@property
def not_found_developer_message(self):
message = 'Learner {} engagement timeline not found'.format(self.kwargs.get('username', ''))
message += f'for course {self.course_id}.' if self.course_id else '.'
return message
def get_api_response(self, request, username, **kwargs):
return self.client.engagement_timelines(username).get(**request.query_params)
class CourseLearnerMetadataView(BaseLearnerApiView):
"""
Forwards requests to the Learner Analytics API's Course Metadata endpoint.
"""
def get_api_response(self, request, course_id, **kwargs):
return self.client.course_learner_metadata(course_id).get(**request.query_params)
| agpl-3.0 |
schlueter/ansible | lib/ansible/modules/commands/telnet.py | 33 | 2576 | # this is a virtual module that is entirely implemented server side
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: telnet
short_description: Executes a low-down and dirty telnet command
version_added: 2.4
description:
- Executes a low-down and dirty telnet command, not going through the module subsystem.
- This is mostly to be used for enabling ssh on devices that only have telnet enabled by default.
options:
command:
description:
- List of commands to be executed in the telnet session.
required: True
aliases: ['commands']
host:
description:
- The host/target on which to execute the command
required: False
default: remote_addr
user:
description:
- The user for login
required: False
default: remote_user
password:
description:
- The password for login
port:
description:
- Remote port to use
default: 23
timeout:
description:
- timeout for remote operations
default: 120
prompts:
description:
- List of prompts expected before sending next command
required: False
default: ['$']
login_prompt:
description:
- Login or username prompt to expect
required: False
default: 'login: '
password_prompt:
description:
- Login or username prompt to expect
required: False
default: 'Password: '
pause:
description:
- Seconds to pause between each command issued
required: False
default: 1
notes:
- The C(environment) keyword does not work with this task
author:
- Ansible Core Team
'''
EXAMPLES = '''
- name: send configuration commands to IOS
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>|#]"
command:
- terminal length 0
- configure terminal
- hostname ios01
- name: run show commands
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>|#]"
command:
- terminal length 0
- show version
'''
RETURN = '''
output:
description: output of each command is an element in this list
type: list
returned: always
sample: [ 'success', 'success', '', 'warning .. something' ]
'''
| gpl-3.0 |
wshallum/ansible | lib/ansible/modules/network/netvisor/pn_vrouterbgp.py | 29 | 15078 | #!/usr/bin/python
""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: pn_vrouterbgp
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to add/remove/modify vrouter-bgp.
description:
- Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a vRouter service that forwards traffic between
networks and implements Layer 4 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add bgp,
'absent' to remove bgp and 'update' to modify bgp.
required: True
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify a name for the vRouter service.
required: True
pn_neighbor:
description:
- Specify a neighbor IP address to use for BGP.
- Required for vrouter-bgp-add.
pn_remote_as:
description:
- Specify the remote Autonomous System(AS) number. This value is between
1 and 4294967295.
- Required for vrouter-bgp-add.
pn_next_hop_self:
description:
- Specify if the next-hop is the same router or not.
pn_password:
description:
- Specify a password, if desired.
pn_ebgp:
description:
- Specify a value for external BGP to accept or attempt BGP connections
to external peers, not directly connected, on the network. This is a
value between 1 and 255.
pn_prefix_listin:
description:
- Specify the prefix list to filter traffic inbound.
pn_prefix_listout:
description:
- Specify the prefix list to filter traffic outbound.
pn_route_reflector:
description:
- Specify if a route reflector client is used.
pn_override_capability:
description:
- Specify if you want to override capability.
pn_soft_reconfig:
description:
- Specify if you want a soft reconfiguration of inbound traffic.
pn_max_prefix:
description:
- Specify the maximum number of prefixes.
pn_max_prefix_warn:
description:
- Specify if you want a warning message when the maximum number of
prefixes is exceeded.
pn_bfd:
description:
- Specify if you want BFD protocol support for fault detection.
pn_multiprotocol:
description:
- Specify a multi-protocol for BGP.
choices: ['ipv4-unicast', 'ipv6-unicast']
pn_weight:
description:
- Specify a default weight value between 0 and 65535 for the neighbor
routes.
pn_default_originate:
description:
- Specify if you want announce default routes to the neighbor or not.
pn_keepalive:
description:
- Specify BGP neighbor keepalive interval in seconds.
pn_holdtime:
description:
- Specify BGP neighbor holdtime in seconds.
pn_route_mapin:
description:
- Specify inbound route map for neighbor.
pn_route_mapout:
description:
- Specify outbound route map for neighbor.
"""
EXAMPLES = """
- name: add vrouter-bgp
pn_vrouterbgp:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_neighbor: 104.104.104.1
pn_remote_as: 1800
- name: remove vrouter-bgp
pn_vrouterbgp:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vrouterbpg command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterbgp command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VROUTER_EXISTS = None
NEIGHBOR_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-bgp-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a BGP neighbor with the given ip exists on the given vRouter,
return NEIGHBOR_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
# Global flags
global VROUTER_EXISTS, NEIGHBOR_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for BGP neighbors
show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
show += 'format neighbor no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if neighbor in out:
NEIGHBOR_EXISTS = True
else:
NEIGHBOR_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-bgp-add'
if state == 'absent':
command = 'vrouter-bgp-remove'
if state == 'update':
command = 'vrouter-bgp-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_neighbor=dict(type='str'),
pn_remote_as=dict(type='str'),
pn_next_hop_self=dict(type='bool'),
pn_password=dict(type='str', no_log=True),
pn_ebgp=dict(type='int'),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_route_reflector=dict(type='bool'),
pn_override_capability=dict(type='bool'),
pn_soft_reconfig=dict(type='bool'),
pn_max_prefix=dict(type='int'),
pn_max_prefix_warn=dict(type='bool'),
pn_bfd=dict(type='bool'),
pn_multiprotocol=dict(type='str',
choices=['ipv4-unicast', 'ipv6-unicast']),
pn_weight=dict(type='int'),
pn_default_originate=dict(type='bool'),
pn_keepalive=dict(type='str'),
pn_holdtime=dict(type='str'),
pn_route_mapin=dict(type='str'),
pn_route_mapout=dict(type='str')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent",
["pn_vrouter_name", "pn_neighbor"]],
["state", "update",
["pn_vrouter_name", "pn_neighbor"]]
)
)
# Accessing the arguments
state= module.params['state']
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
remote_as = module.params['pn_remote_as']
next_hop_self = module.params['pn_next_hop_self']
password = module.params['pn_password']
ebgp = module.params['pn_ebgp']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
route_reflector = module.params['pn_route_reflector']
override_capability = module.params['pn_override_capability']
soft_reconfig = module.params['pn_soft_reconfig']
max_prefix = module.params['pn_max_prefix']
max_prefix_warn = module.params['pn_max_prefix_warn']
bfd = module.params['pn_bfd']
multiprotocol = module.params['pn_multiprotocol']
weight = module.params['pn_weight']
default_originate = module.params['pn_default_originate']
keepalive = module.params['pn_keepalive']
holdtime = module.params['pn_holdtime']
route_mapin = module.params['pn_route_mapin']
route_mapout = module.params['pn_route_mapout']
# Building the CLI command string
cli = pn_cli(module)
command = get_command_from_state(state)
if command == 'vrouter-bgp-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s does not exist on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
else:
if command == 'vrouter-bgp-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s already exists on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
if remote_as:
cli += ' remote-as ' + str(remote_as)
if next_hop_self is True:
cli += ' next-hop-self '
if next_hop_self is False:
cli += ' no-next-hop-self '
if password:
cli += ' password ' + password
if ebgp:
cli += ' ebgp-multihop ' + str(ebgp)
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
if route_reflector is True:
cli += ' route-reflector-client '
if route_reflector is False:
cli += ' no-route-reflector-client '
if override_capability is True:
cli += ' override-capability '
if override_capability is False:
cli += ' no-override-capability '
if soft_reconfig is True:
cli += ' soft-reconfig-inbound '
if soft_reconfig is False:
cli += ' no-soft-reconfig-inbound '
if max_prefix:
cli += ' max-prefix ' + str(max_prefix)
if max_prefix_warn is True:
cli += ' max-prefix-warn-only '
if max_prefix_warn is False:
cli += ' no-max-prefix-warn-only '
if bfd is True:
cli += ' bfd '
if bfd is False:
cli += ' no-bfd '
if multiprotocol:
cli += ' multi-protocol ' + multiprotocol
if weight:
cli += ' weight ' + str(weight)
if default_originate is True:
cli += ' default-originate '
if default_originate is False:
cli += ' no-default-originate '
if keepalive:
cli += ' neighbor-keepalive-interval ' + keepalive
if holdtime:
cli += ' neighbor-holdtime ' + holdtime
if route_mapin:
cli += ' route-map-in ' + route_mapin
if route_mapout:
cli += ' route-map-out ' + route_mapout
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
therandomcode/Fanalytics | lib/oauth2client/service_account.py | 52 | 5038 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A service account credentials class.
This credentials class is implemented on top of rsa library.
"""
import base64
import time
from pyasn1.codec.ber import decoder
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client._helpers import _json_encode
from oauth2client._helpers import _to_bytes
from oauth2client._helpers import _urlsafe_b64encode
from oauth2client import util
from oauth2client.client import AssertionCredentials
class _ServiceAccountCredentials(AssertionCredentials):
"""Class representing a service account (signed JWT) credential."""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
def __init__(self, service_account_id, service_account_email,
private_key_id, private_key_pkcs8_text, scopes,
user_agent=None, token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI, **kwargs):
super(_ServiceAccountCredentials, self).__init__(
None, user_agent=user_agent, token_uri=token_uri,
revoke_uri=revoke_uri)
self._service_account_id = service_account_id
self._service_account_email = service_account_email
self._private_key_id = private_key_id
self._private_key = _get_private_key(private_key_pkcs8_text)
self._private_key_pkcs8_text = private_key_pkcs8_text
self._scopes = util.scopes_to_string(scopes)
self._user_agent = user_agent
self._token_uri = token_uri
self._revoke_uri = revoke_uri
self._kwargs = kwargs
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
header = {
'alg': 'RS256',
'typ': 'JWT',
'kid': self._private_key_id
}
now = int(time.time())
payload = {
'aud': self._token_uri,
'scope': self._scopes,
'iat': now,
'exp': now + _ServiceAccountCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self._service_account_email
}
payload.update(self._kwargs)
first_segment = _urlsafe_b64encode(_json_encode(header))
second_segment = _urlsafe_b64encode(_json_encode(payload))
assertion_input = first_segment + b'.' + second_segment
# Sign the assertion.
rsa_bytes = rsa.pkcs1.sign(assertion_input, self._private_key,
'SHA-256')
signature = base64.urlsafe_b64encode(rsa_bytes).rstrip(b'=')
return assertion_input + b'.' + signature
def sign_blob(self, blob):
# Ensure that it is bytes
blob = _to_bytes(blob, encoding='utf-8')
return (self._private_key_id,
rsa.pkcs1.sign(blob, self._private_key, 'SHA-256'))
@property
def service_account_email(self):
return self._service_account_email
@property
def serialization_data(self):
return {
'type': 'service_account',
'client_id': self._service_account_id,
'client_email': self._service_account_email,
'private_key_id': self._private_key_id,
'private_key': self._private_key_pkcs8_text
}
def create_scoped_required(self):
return not self._scopes
def create_scoped(self, scopes):
return _ServiceAccountCredentials(self._service_account_id,
self._service_account_email,
self._private_key_id,
self._private_key_pkcs8_text,
scopes,
user_agent=self._user_agent,
token_uri=self._token_uri,
revoke_uri=self._revoke_uri,
**self._kwargs)
def _get_private_key(private_key_pkcs8_text):
"""Get an RSA private key object from a pkcs8 representation."""
private_key_pkcs8_text = _to_bytes(private_key_pkcs8_text)
der = rsa.pem.load_pem(private_key_pkcs8_text, 'PRIVATE KEY')
asn1_private_key, _ = decoder.decode(der, asn1Spec=PrivateKeyInfo())
return rsa.PrivateKey.load_pkcs1(
asn1_private_key.getComponentByName('privateKey').asOctets(),
format='DER')
| apache-2.0 |
donald-pinckney/EM-Simulator | EM Sim/EM SimContent/Lib/lib2to3/pgen2/token.py | 353 | 1244 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# Taken from Python (r53757) and modified to include some tokens
# originally monkeypatched in by pgen2.tokenize
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
COMMENT = 52
NL = 53
RARROW = 54
ERRORTOKEN = 55
N_TOKENS = 56
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
| apache-2.0 |
wbinventor/openmc | openmc/data/kalbach_mann.py | 1 | 14224 | from collections.abc import Iterable
from numbers import Real, Integral
from warnings import warn
import numpy as np
import openmc.checkvalue as cv
from openmc.stats import Tabular, Univariate, Discrete, Mixture
from .function import Tabulated1D, INTERPOLATION_SCHEME
from .angle_energy import AngleEnergy
from .data import EV_PER_MEV
from .endf import get_list_record, get_tab2_record
class KalbachMann(AngleEnergy):
"""Kalbach-Mann distribution
Parameters
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
energy_out : Iterable of openmc.stats.Univariate
Distribution of outgoing energies corresponding to each incoming energy
precompound : Iterable of openmc.data.Tabulated1D
Precompound factor 'r' as a function of outgoing energy for each
incoming energy
slope : Iterable of openmc.data.Tabulated1D
Kalbach-Chadwick angular distribution slope value 'a' as a function of
outgoing energy for each incoming energy
Attributes
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
energy_out : Iterable of openmc.stats.Univariate
Distribution of outgoing energies corresponding to each incoming energy
precompound : Iterable of openmc.data.Tabulated1D
Precompound factor 'r' as a function of outgoing energy for each
incoming energy
slope : Iterable of openmc.data.Tabulated1D
Kalbach-Chadwick angular distribution slope value 'a' as a function of
outgoing energy for each incoming energy
"""
def __init__(self, breakpoints, interpolation, energy, energy_out,
precompound, slope):
super().__init__()
self.breakpoints = breakpoints
self.interpolation = interpolation
self.energy = energy
self.energy_out = energy_out
self.precompound = precompound
self.slope = slope
@property
def breakpoints(self):
return self._breakpoints
@property
def interpolation(self):
return self._interpolation
@property
def energy(self):
return self._energy
@property
def energy_out(self):
return self._energy_out
@property
def precompound(self):
return self._precompound
@property
def slope(self):
return self._slope
@breakpoints.setter
def breakpoints(self, breakpoints):
cv.check_type('Kalbach-Mann breakpoints', breakpoints,
Iterable, Integral)
self._breakpoints = breakpoints
@interpolation.setter
def interpolation(self, interpolation):
cv.check_type('Kalbach-Mann interpolation', interpolation,
Iterable, Integral)
self._interpolation = interpolation
@energy.setter
def energy(self, energy):
cv.check_type('Kalbach-Mann incoming energy', energy,
Iterable, Real)
self._energy = energy
@energy_out.setter
def energy_out(self, energy_out):
cv.check_type('Kalbach-Mann distributions', energy_out,
Iterable, Univariate)
self._energy_out = energy_out
@precompound.setter
def precompound(self, precompound):
cv.check_type('Kalbach-Mann precompound factor', precompound,
Iterable, Tabulated1D)
self._precompound = precompound
@slope.setter
def slope(self, slope):
cv.check_type('Kalbach-Mann slope', slope, Iterable, Tabulated1D)
self._slope = slope
def to_hdf5(self, group):
"""Write distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
group.attrs['type'] = np.string_('kalbach-mann')
dset = group.create_dataset('energy', data=self.energy)
dset.attrs['interpolation'] = np.vstack((self.breakpoints,
self.interpolation))
# Determine total number of (E,p,r,a) tuples and create array
n_tuple = sum(len(d) for d in self.energy_out)
distribution = np.empty((5, n_tuple))
# Create array for offsets
offsets = np.empty(len(self.energy_out), dtype=int)
interpolation = np.empty(len(self.energy_out), dtype=int)
n_discrete_lines = np.empty(len(self.energy_out), dtype=int)
j = 0
# Populate offsets and distribution array
for i, (eout, km_r, km_a) in enumerate(zip(
self.energy_out, self.precompound, self.slope)):
n = len(eout)
offsets[i] = j
if isinstance(eout, Mixture):
discrete, continuous = eout.distribution
n_discrete_lines[i] = m = len(discrete)
interpolation[i] = 1 if continuous.interpolation == 'histogram' else 2
distribution[0, j:j+m] = discrete.x
distribution[1, j:j+m] = discrete.p
distribution[2, j:j+m] = discrete.c
distribution[0, j+m:j+n] = continuous.x
distribution[1, j+m:j+n] = continuous.p
distribution[2, j+m:j+n] = continuous.c
else:
if isinstance(eout, Tabular):
n_discrete_lines[i] = 0
interpolation[i] = 1 if eout.interpolation == 'histogram' else 2
elif isinstance(eout, Discrete):
n_discrete_lines[i] = n
interpolation[i] = 1
distribution[0, j:j+n] = eout.x
distribution[1, j:j+n] = eout.p
distribution[2, j:j+n] = eout.c
distribution[3, j:j+n] = km_r.y
distribution[4, j:j+n] = km_a.y
j += n
# Create dataset for distributions
dset = group.create_dataset('distribution', data=distribution)
# Write interpolation as attribute
dset.attrs['offsets'] = offsets
dset.attrs['interpolation'] = interpolation
dset.attrs['n_discrete_lines'] = n_discrete_lines
@classmethod
def from_hdf5(cls, group):
"""Generate Kalbach-Mann distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.KalbachMann
Kalbach-Mann energy distribution
"""
interp_data = group['energy'].attrs['interpolation']
energy_breakpoints = interp_data[0, :]
energy_interpolation = interp_data[1, :]
energy = group['energy'][()]
data = group['distribution']
offsets = data.attrs['offsets']
interpolation = data.attrs['interpolation']
n_discrete_lines = data.attrs['n_discrete_lines']
energy_out = []
precompound = []
slope = []
n_energy = len(energy)
for i in range(n_energy):
# Determine length of outgoing energy distribution and number of
# discrete lines
j = offsets[i]
if i < n_energy - 1:
n = offsets[i+1] - j
else:
n = data.shape[1] - j
m = n_discrete_lines[i]
# Create discrete distribution if lines are present
if m > 0:
eout_discrete = Discrete(data[0, j:j+m], data[1, j:j+m])
eout_discrete.c = data[2, j:j+m]
p_discrete = eout_discrete.c[-1]
# Create continuous distribution
if m < n:
interp = INTERPOLATION_SCHEME[interpolation[i]]
eout_continuous = Tabular(data[0, j+m:j+n], data[1, j+m:j+n], interp)
eout_continuous.c = data[2, j+m:j+n]
# If both continuous and discrete are present, create a mixture
# distribution
if m == 0:
eout_i = eout_continuous
elif m == n:
eout_i = eout_discrete
else:
eout_i = Mixture([p_discrete, 1. - p_discrete],
[eout_discrete, eout_continuous])
km_r = Tabulated1D(data[0, j:j+n], data[3, j:j+n])
km_a = Tabulated1D(data[0, j:j+n], data[4, j:j+n])
energy_out.append(eout_i)
precompound.append(km_r)
slope.append(km_a)
return cls(energy_breakpoints, energy_interpolation,
energy, energy_out, precompound, slope)
@classmethod
def from_ace(cls, ace, idx, ldis):
"""Generate Kalbach-Mann energy-angle distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
idx : int
Index in XSS array of the start of the energy distribution data
(LDIS + LOCC - 1)
ldis : int
Index in XSS array of the start of the energy distribution block
(e.g. JXS[11])
Returns
-------
openmc.data.KalbachMann
Kalbach-Mann energy-angle distribution
"""
# Read number of interpolation regions and incoming energies
n_regions = int(ace.xss[idx])
n_energy_in = int(ace.xss[idx + 1 + 2*n_regions])
# Get interpolation information
idx += 1
if n_regions > 0:
breakpoints = ace.xss[idx:idx + n_regions].astype(int)
interpolation = ace.xss[idx + n_regions:idx + 2*n_regions].astype(int)
else:
breakpoints = np.array([n_energy_in])
interpolation = np.array([2])
# Incoming energies at which distributions exist
idx += 2*n_regions + 1
energy = ace.xss[idx:idx + n_energy_in]*EV_PER_MEV
# Location of distributions
idx += n_energy_in
loc_dist = ace.xss[idx:idx + n_energy_in].astype(int)
# Initialize variables
energy_out = []
km_r = []
km_a = []
# Read each outgoing energy distribution
for i in range(n_energy_in):
idx = ldis + loc_dist[i] - 1
# intt = interpolation scheme (1=hist, 2=lin-lin)
INTTp = int(ace.xss[idx])
intt = INTTp % 10
n_discrete_lines = (INTTp - intt)//10
if intt not in (1, 2):
warn("Interpolation scheme for continuous tabular distribution "
"is not histogram or linear-linear.")
intt = 2
n_energy_out = int(ace.xss[idx + 1])
data = ace.xss[idx + 2:idx + 2 + 5*n_energy_out].copy()
data.shape = (5, n_energy_out)
data[0,:] *= EV_PER_MEV
# Create continuous distribution
eout_continuous = Tabular(data[0][n_discrete_lines:],
data[1][n_discrete_lines:]/EV_PER_MEV,
INTERPOLATION_SCHEME[intt],
ignore_negative=True)
eout_continuous.c = data[2][n_discrete_lines:]
if np.any(data[1][n_discrete_lines:] < 0.0):
warn("Kalbach-Mann energy distribution has negative "
"probabilities.")
# If discrete lines are present, create a mixture distribution
if n_discrete_lines > 0:
eout_discrete = Discrete(data[0][:n_discrete_lines],
data[1][:n_discrete_lines])
eout_discrete.c = data[2][:n_discrete_lines]
if n_discrete_lines == n_energy_out:
eout_i = eout_discrete
else:
p_discrete = min(sum(eout_discrete.p), 1.0)
eout_i = Mixture([p_discrete, 1. - p_discrete],
[eout_discrete, eout_continuous])
else:
eout_i = eout_continuous
energy_out.append(eout_i)
km_r.append(Tabulated1D(data[0], data[3]))
km_a.append(Tabulated1D(data[0], data[4]))
return cls(breakpoints, interpolation, energy, energy_out, km_r, km_a)
@classmethod
def from_endf(cls, file_obj):
"""Generate Kalbach-Mann distribution from an ENDF evaluation
Parameters
----------
file_obj : file-like object
ENDF file positioned at the start of the Kalbach-Mann distribution
Returns
-------
openmc.data.KalbachMann
Kalbach-Mann energy-angle distribution
"""
params, tab2 = get_tab2_record(file_obj)
lep = params[3]
ne = params[5]
energy = np.zeros(ne)
n_discrete_energies = np.zeros(ne, dtype=int)
energy_out = []
precompound = []
slope = []
for i in range(ne):
items, values = get_list_record(file_obj)
energy[i] = items[1]
n_discrete_energies[i] = items[2]
# TODO: split out discrete energies
n_angle = items[3]
n_energy_out = items[5]
values = np.asarray(values)
values.shape = (n_energy_out, n_angle + 2)
# Outgoing energy distribution at the i-th incoming energy
eout_i = values[:,0]
eout_p_i = values[:,1]
energy_out_i = Tabular(eout_i, eout_p_i, INTERPOLATION_SCHEME[lep])
energy_out.append(energy_out_i)
# Precompound and slope factors for Kalbach-Mann
r_i = values[:,2]
if n_angle == 2:
a_i = values[:,3]
else:
a_i = np.zeros_like(r_i)
precompound.append(Tabulated1D(eout_i, r_i))
slope.append(Tabulated1D(eout_i, a_i))
return cls(tab2.breakpoints, tab2.interpolation, energy,
energy_out, precompound, slope)
| mit |
bclau/nova | nova/tests/integrated/test_servers.py | 8 | 19688 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import zlib
from nova import context
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.tests import fake_network
from nova.tests.integrated.api import client
from nova.tests.integrated import integrated_helpers
import nova.virt.fake
LOG = logging.getLogger(__name__)
class ServersTest(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
_force_delete_parameter = 'forceDelete'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = 'accessIPv4'
_access_ipv6_parameter = 'accessIPv6'
_return_resv_id_parameter = 'return_reservation_id'
_min_count_parameter = 'min_count'
def setUp(self):
super(ServersTest, self).setUp()
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, *args, **kwargs):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
self.compute = self.start_service('compute', *args, **kwargs)
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*_):
raise Exception()
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server[self._image_ref_parameter] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server[self._image_ref_parameter] = good_server.get(
self._image_ref_parameter)
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server[self._flavor_ref_parameter] = good_server.get(
self._flavor_ref_parameter)
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertTrue(created_server_id in server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertTrue("image" in server)
self.assertTrue("flavor" in server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Cannot forceDelete unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'forceDelete': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id,
{self._force_delete_parameter: {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
#if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2',
found_server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', found_server[self._access_ipv6_parameter])
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2',
found_server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', found_server[self._access_ipv6_parameter])
# Cleanup
self._delete_server(created_server_id)
def test_rename_server(self):
# Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertEqual(found_server, None)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
fake_network.set_stub_network_methods(self.stubs)
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': data.encode('base64'),
})
# Inject a binary file
data = zlib.compress('Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': data.encode('base64'),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
_force_delete_parameter = 'force_delete'
_api_version = 'v3'
_image_ref_parameter = 'image_ref'
_flavor_ref_parameter = 'flavor_ref'
_access_ipv4_parameter = 'access_ip_v4'
_access_ipv6_parameter = 'access_ip_v6'
_return_resv_id_parameter = 'os-multiple-create:return_reservation_id'
_min_count_parameter = 'os-multiple-create:min_count'
| apache-2.0 |
havard024/prego | sales/api/tests.py | 3 | 17262 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#-*- codeing: utf-8 -*-
import json
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from treeio.core.models import User, Group, Perspective, ModuleSetting, Object
from treeio.identities.models import Contact, ContactType
from treeio.sales.models import SaleOrder, Product, OrderedProduct, Subscription, \
SaleStatus, SaleSource, Lead, Opportunity
from treeio.finance.models import Currency
class SalesAPITest(TestCase):
"Sales functional tests for views"
username = "api_test"
password = "api_password"
prepared = False
authentication_headers = {"CONTENT_TYPE": "application/json",
"HTTP_AUTHORIZATION": "Basic YXBpX3Rlc3Q6YXBpX3Bhc3N3b3Jk"}
content_type = 'application/json'
def setUp(self):
"Initial Setup"
if not self.prepared:
# Clean up first
Object.objects.all().delete()
User.objects.all().delete()
# Create objects
try:
self.group = Group.objects.get(name='test')
except Group.DoesNotExist:
Group.objects.all().delete()
self.group = Group(name='test')
self.group.save()
try:
self.user = DjangoUser.objects.get(username=self.username)
self.user.set_password(self.password)
try:
self.profile = self.user.get_profile()
except Exception:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
except DjangoUser.DoesNotExist:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
try:
perspective = Perspective.objects.get(name='default')
except Perspective.DoesNotExist:
Perspective.objects.all().delete()
perspective = Perspective(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.contact_type = ContactType()
self.contact_type.slug = 'machine'
self.contact_type.name = 'machine'
self.contact_type.save()
self.contact = Contact()
self.contact.contact_type = self.contact_type
self.contact.set_default_user()
self.contact.save()
self.assertNotEquals(self.contact.id, None)
self.status = SaleStatus()
self.status.active = True
self.status.use_sales = True
self.status.use_leads = True
self.status.use_opportunities = True
self.status.set_default_user()
self.status.save()
self.assertNotEquals(self.status.id, None)
self.currency = Currency(code="GBP",
name="Pounds",
symbol="L",
is_default=True)
self.currency.save()
self.source = SaleSource()
self.source.active = True
self.source.save()
self.source.set_user(self.user)
self.assertNotEquals(self.source.id, None)
self.product = Product(name="Test")
self.product.product_type = 'service'
self.product.active = True
self.product.sell_price = 10
self.product.buy_price = 100
self.product.set_default_user()
self.product.save()
self.assertNotEquals(self.product.id, None)
self.subscription = Subscription()
self.subscription.client = self.contact
self.subscription.set_default_user()
self.subscription.save()
self.assertNotEquals(self.subscription.id, None)
self.lead = Lead()
self.lead.contact_method = 'email'
self.lead.status = self.status
self.lead.contact = self.contact
self.lead.set_default_user()
self.lead.save()
self.assertNotEquals(self.lead.id, None)
self.opportunity = Opportunity()
self.opportunity.lead = self.lead
self.opportunity.contact = self.contact
self.opportunity.status = self.status
self.opportunity.amount = 100
self.opportunity.amount_currency = self.currency
self.opportunity.amount_display = 120
self.opportunity.set_default_user()
self.opportunity.save()
self.assertNotEquals(self.opportunity.id, None)
self.order = SaleOrder(reference="Test")
self.order.opportunity = self.opportunity
self.order.status = self.status
self.order.source = self.source
self.order.currency = self.currency
self.order.total = 0
self.order.total_display = 0
self.order.set_default_user()
self.order.save()
self.assertNotEquals(self.order.id, None)
self.ordered_product = OrderedProduct()
self.ordered_product.product = self.product
self.ordered_product.order = self.order
self.ordered_product.rate = 0
self.ordered_product.subscription = self.subscription
self.ordered_product.set_default_user()
self.ordered_product.save()
self.assertNotEquals(self.ordered_product.id, None)
self.client = Client()
self.prepared = True
def test_unauthenticated_access(self):
"Test index page at /sales/statuses"
response = self.client.get('/api/sales/statuses')
# Redirects as unauthenticated
self.assertEquals(response.status_code, 401)
def test_get_statuses_list(self):
""" Test index page api/sales/status """
response = self.client.get(
path=reverse('api_sales_status'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_status(self):
response = self.client.get(path=reverse('api_sales_status', kwargs={
'object_ptr': self.status.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_status(self):
updates = {"name": "Close_API", "active": True, "details": "api test details",
"use_leads": True, "use_opportunities": True, "hidden": False}
response = self.client.put(path=reverse('api_sales_status', kwargs={'object_ptr': self.status.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['details'], updates['details'])
self.assertEquals(data['use_leads'], updates['use_leads'])
self.assertEquals(
data['use_opportunities'], updates['use_opportunities'])
self.assertEquals(data['hidden'], updates['hidden'])
def test_get_products_list(self):
""" Test index page api/sales/products """
response = self.client.get(
path=reverse('api_sales_products'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_product(self):
response = self.client.get(path=reverse('api_sales_products', kwargs={
'object_ptr': self.product.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_product(self):
updates = {"name": "API product", "parent": None, "product_type": "service", "code": "api_test_code",
"buy_price": '100.05', "sell_price": '10.5', "active": True, "runout_action": "ignore", "details": "api details"}
response = self.client.put(path=reverse('api_sales_products', kwargs={'object_ptr': self.product.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['product_type'], updates['product_type'])
self.assertEquals(data['code'], updates['code'])
self.assertEquals(data['buy_price'], updates['buy_price'])
self.assertEquals(data['sell_price'], updates['sell_price'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['runout_action'], updates['runout_action'])
self.assertEquals(data['details'], updates['details'])
def test_get_sources_list(self):
""" Test index page api/sales/sources """
response = self.client.get(
path=reverse('api_sales_sources'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_source(self):
response = self.client.get(path=reverse('api_sales_sources', kwargs={
'object_ptr': self.source.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_source(self):
updates = {
"name": "Api source", "active": True, "details": "api details"}
response = self.client.put(path=reverse('api_sales_sources', kwargs={'object_ptr': self.source.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['details'], updates['details'])
#
def test_get_leads_list(self):
""" Test index page api/sales/leads """
response = self.client.get(
path=reverse('api_sales_leads'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_lead(self):
response = self.client.get(path=reverse(
'api_sales_leads', kwargs={'object_ptr': self.lead.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_lead(self):
updates = {"status": self.status.id, "contact_method": "email", "contact": self.contact.id,
"products_interested": [self.product.id], "source": self.source.id, 'details': 'Api details'}
response = self.client.put(path=reverse('api_sales_leads', kwargs={'object_ptr': self.lead.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['status']['id'], updates['status'])
self.assertEquals(data['contact_method'], updates['contact_method'])
self.assertEquals(data['contact']['id'], updates['contact'])
for i, product in enumerate(data['products_interested']):
self.assertEquals(product['id'], updates['products_interested'][i])
self.assertEquals(data['source']['id'], updates['source'])
self.assertEquals(data['details'], updates['details'])
def test_get_opportunities_list(self):
""" Test index page api/sales/opportunities """
response = self.client.get(
path=reverse('api_sales_opportunities'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_opportunity(self):
response = self.client.get(path=reverse('api_sales_opportunities', kwargs={
'object_ptr': self.opportunity.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_opportunity(self):
updates = {"status": self.status.id, "products_interested": [self.product.id], "contact": self.contact.id,
"amount_display": 3000.56, "amount_currency": self.currency.id, "details": "API DETAILS"}
response = self.client.put(path=reverse('api_sales_opportunities', kwargs={'object_ptr': self.opportunity.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['status']['id'], updates['status'])
self.assertEquals(data['contact']['id'], updates['contact'])
for i, product in enumerate(data['products_interested']):
self.assertEquals(product['id'], updates['products_interested'][i])
self.assertEquals(
data['amount_currency']['id'], updates['amount_currency'])
self.assertEquals(data['details'], updates['details'])
def test_get_orders_list(self):
""" Test index page api/sales/orders """
response = self.client.get(
path=reverse('api_sales_orders'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_order(self):
response = self.client.get(path=reverse(
'api_sales_orders', kwargs={'object_ptr': self.order.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_order(self):
updates = {"datetime": "2011-04-11 12:01:15", "status": self.status.id,
"source": self.source.id, "details": "api details"}
response = self.client.put(path=reverse('api_sales_orders', kwargs={'object_ptr': self.order.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['status']['id'], updates['status'])
self.assertEquals(data['source']['id'], updates['source'])
self.assertEquals(data['details'], updates['details'])
def test_get_subscriptions_list(self):
""" Test index page api/sales/subscriptions"""
response = self.client.get(
path=reverse('api_sales_subscriptions'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_subscription(self):
response = self.client.get(path=reverse('api_sales_subscriptions', kwargs={
'object_ptr': self.subscription.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_subscription(self):
updates = {"product": self.product.id, "start": "2011-06-30",
"cycle_period": "daily", "active": True, "details": "api details"}
response = self.client.put(path=reverse('api_sales_subscriptions', kwargs={'object_ptr': self.subscription.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['product']['id'], updates['product'])
self.assertEquals(data['cycle_period'], updates['cycle_period'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['details'], updates['details'])
def test_get_ordered_product(self):
response = self.client.get(path=reverse('api_sales_ordered_products', kwargs={
'object_ptr': self.ordered_product.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_ordered_product(self):
updates = {
"discount": '10.0', "product": self.product.id, "quantity": '10'}
response = self.client.put(path=reverse('api_sales_ordered_products', kwargs={'object_ptr': self.ordered_product.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['product']['id'], updates['product'])
self.assertEquals(data['discount'], updates['discount'])
self.assertEquals(data['quantity'], updates['quantity'])
| mit |
iwoca/django-deep-collector | tests/test_serializer.py | 1 | 1514 | from copy import copy
import json
from django.test import TestCase
from .factories import ChildModelFactory
from deep_collector.compat.serializers import MultiModelInheritanceSerializer
class TestMultiModelInheritanceSerializer(TestCase):
def test_that_parent_model_fields_are_in_serializated_object_if_parent_is_not_abstract(self):
child_model = ChildModelFactory.create()
serializer = MultiModelInheritanceSerializer()
json_objects = serializer.serialize([child_model])
child_model_dict = json.loads(json_objects)[0]
serialized_fields = child_model_dict['fields'].keys()
self.assertIn('child_field', serialized_fields)
self.assertIn('o2o', serialized_fields)
self.assertIn('fkey', serialized_fields)
def test_that_we_dont_alter_model_class_meta_after_serialization(self):
child_model = ChildModelFactory.create()
local_fields_before = copy(child_model._meta.concrete_model._meta.local_fields)
local_m2m_fields_before = copy(child_model._meta.concrete_model._meta.local_many_to_many)
serializer = MultiModelInheritanceSerializer()
serializer.serialize([child_model])
local_fields_after = copy(child_model._meta.concrete_model._meta.local_fields)
local_m2m_fields_after = copy(child_model._meta.concrete_model._meta.local_many_to_many)
self.assertEqual(local_fields_before, local_fields_after)
self.assertEqual(local_m2m_fields_before, local_m2m_fields_after)
| bsd-3-clause |
garyjyao1/ansible | lib/ansible/plugins/connection/docker.py | 15 | 8622 | # Based on the chroot connection plugin by Maykel Moya
#
# Connection plugin for configuring docker containers
# (c) 2014, Lorin Hochstein
# (c) 2015, Leendert Brouwer
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# Maintainer: Leendert Brouwer (https://github.com/objectified)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import pipes
import subprocess
import re
from distutils.version import LooseVersion
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
BUFSIZE = 65536
class Connection(ConnectionBase):
''' Local docker based connections '''
transport = 'docker'
has_pipelining = True
# su currently has an undiagnosed issue with calculating the file
# checksums (so copy, for instance, doesn't work right)
# Have to look into that before re-enabling this
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
# Note: docker supports running as non-root in some configurations.
# (For instance, setting the UNIX socket file to be readable and
# writable by a specific UNIX group and then putting users into that
# group). Therefore we don't check that the user is root when using
# this connection. But if the user is getting a permission denied
# error it probably means that docker on their system is only
# configured to be connected to by root and they are not running as
# root.
if 'docker_command' in kwargs:
self.docker_cmd = kwargs['docker_command']
else:
self.docker_cmd = distutils.spawn.find_executable('docker')
if not self.docker_cmd:
raise AnsibleError("docker command not found in PATH")
self.can_copy_bothways = False
docker_version = self._get_docker_version()
if LooseVersion(docker_version) < LooseVersion('1.3'):
raise AnsibleError('docker connection type requires docker 1.3 or higher')
if LooseVersion(docker_version) >= LooseVersion('1.8.0'):
self.can_copy_bothways = True
@staticmethod
def _sanitize_version(version):
return re.sub('[^0-9a-zA-Z\.]', '', version)
def _get_docker_version(self):
cmd = [self.docker_cmd, 'version']
cmd_output = subprocess.check_output(cmd)
for line in cmd_output.split('\n'):
if line.startswith('Server version:'): # old docker versions
return self._sanitize_version(line.split()[2])
# no result yet, must be newer Docker version
new_docker_cmd = [
self.docker_cmd,
'version', '--format', "'{{.Server.Version}}'"
]
cmd_output = subprocess.check_output(new_docker_cmd)
return self._sanitize_version(cmd_output)
def _connect(self, port=None):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
self._display.vvv("ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
self._play_context.remote_user, host=self._play_context.remote_addr)
)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=False):
""" Run a command on the docker host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
# -i is needed to keep stdin open which allows pipelining to work
local_cmd = [self.docker_cmd, "exec", '-i', self._play_context.remote_addr, executable, '-c', cmd]
self._display.vvv("EXEC %s" % (local_cmd), host=self._play_context.remote_addr)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
""" Transfer a file from local to docker container """
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
out_path = self._prefix_login_path(out_path)
if not os.path.exists(in_path):
raise AnsibleFileNotFound(
"file or module does not exist: %s" % in_path)
if self.can_copy_bothways:
# only docker >= 1.8.1 can do this natively
args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
else:
out_path = pipes.quote(out_path)
# Older docker doesn't have native support for copying files into
# running containers, so we use docker exec to implement this
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c",
"dd of={0} bs={1}".format(out_path, BUFSIZE)]
with open(in_path, 'rb') as in_file:
try:
p = subprocess.Popen(args, stdin=in_file,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError("docker connection with docker < 1.8.1 requires dd command in the chroot")
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = self._prefix_login_path(in_path)
# out_path is the final file path, but docker takes a directory, not a
# file path
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
# Rename if needed
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
if actual_out_path != out_path:
os.rename(actual_out_path, out_path)
def close(self):
""" Terminate the connection. Nothing to do for Docker"""
super(Connection, self).close()
self._connected = False
| gpl-3.0 |
Achuth17/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
c0defreak/python-for-android | python3-alpha/extra_modules/gdata/geo/__init__.py | 45 | 6005 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.geo, implementing geological positioning in gdata structures
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Picasa Web Albums uses the georss and gml namespaces for
elements defined in the GeoRSS and Geography Markup Language specifications.
Specifically, Picasa Web Albums uses the following elements:
georss:where
gml:Point
gml:pos
http://code.google.com/apis/picasaweb/reference.html#georss_reference
Picasa Web Albums also accepts geographic-location data in two other formats:
W3C format and plain-GeoRSS (without GML) format.
"""
#
#Over the wire, the Picasa Web Albums only accepts and sends the
#elements mentioned above, but this module will let you seamlessly convert
#between the different formats (TODO 2007-10-18 hg)
__author__ = '[email protected]'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
GML_NAMESPACE = 'http://www.opengis.net/gml'
GEORSS_NAMESPACE = 'http://www.georss.org/georss'
class GeoBaseElement(atom.AtomBase):
"""Base class for elements.
To add new elements, you only need to add the element tag name to self._tag
and the namespace to self._namespace
"""
_tag = ''
_namespace = GML_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Pos(GeoBaseElement):
"""(string) Specifies a latitude and longitude, separated by a space,
e.g. `35.669998 139.770004'"""
_tag = 'pos'
def PosFromString(xml_string):
return atom.CreateClassFromXMLString(Pos, xml_string)
class Point(GeoBaseElement):
"""(container) Specifies a particular geographical point, by means of
a <gml:pos> element."""
_tag = 'Point'
_children = atom.AtomBase._children.copy()
_children['{%s}pos' % GML_NAMESPACE] = ('pos', Pos)
def __init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None):
GeoBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
if pos is None:
pos = Pos()
self.pos=pos
def PointFromString(xml_string):
return atom.CreateClassFromXMLString(Point, xml_string)
class Where(GeoBaseElement):
"""(container) Specifies a geographical location or region.
A container element, containing a single <gml:Point> element.
(Not to be confused with <gd:where>.)
Note that the (only) child attribute, .Point, is title-cased.
This reflects the names of elements in the xml stream
(principle of least surprise).
As a convenience, you can get a tuple of (lat, lon) with Where.location(),
and set the same data with Where.setLocation( (lat, lon) ).
Similarly, there are methods to set and get only latitude and longitude.
"""
_tag = 'where'
_namespace = GEORSS_NAMESPACE
_children = atom.AtomBase._children.copy()
_children['{%s}Point' % GML_NAMESPACE] = ('Point', Point)
def __init__(self, point=None, extension_elements=None, extension_attributes=None, text=None):
GeoBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
if point is None:
point = Point()
self.Point=point
def location(self):
"(float, float) Return Where.Point.pos.text as a (lat,lon) tuple"
try:
return tuple([float(z) for z in self.Point.pos.text.split(' ')])
except AttributeError:
return tuple()
def set_location(self, latlon):
"""(bool) Set Where.Point.pos.text from a (lat,lon) tuple.
Arguments:
lat (float): The latitude in degrees, from -90.0 to 90.0
lon (float): The longitude in degrees, from -180.0 to 180.0
Returns True on success.
"""
assert(isinstance(latlon[0], float))
assert(isinstance(latlon[1], float))
try:
self.Point.pos.text = "%s %s" % (latlon[0], latlon[1])
return True
except AttributeError:
return False
def latitude(self):
"(float) Get the latitude value of the geo-tag. See also .location()"
lat, lon = self.location()
return lat
def longitude(self):
"(float) Get the longtitude value of the geo-tag. See also .location()"
lat, lon = self.location()
return lon
longtitude = longitude
def set_latitude(self, lat):
"""(bool) Set the latitude value of the geo-tag.
Args:
lat (float): The new latitude value
See also .set_location()
"""
_lat, lon = self.location()
return self.set_location(lat, lon)
def set_longitude(self, lon):
"""(bool) Set the longtitude value of the geo-tag.
Args:
lat (float): The new latitude value
See also .set_location()
"""
lat, _lon = self.location()
return self.set_location(lat, lon)
set_longtitude = set_longitude
def WhereFromString(xml_string):
return atom.CreateClassFromXMLString(Where, xml_string)
| apache-2.0 |
ycaihua/kbengine | kbe/res/scripts/common/Lib/encodings/rot_13.py | 155 | 2428 | #!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
This codec de/encodes from str to str.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return (input.translate(rot13_map), len(input))
def decode(self, input, errors='strict'):
return (input.translate(rot13_map), len(input))
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return input.translate(rot13_map)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return input.translate(rot13_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
_is_text_encoding=False,
)
### Map
rot13_map = codecs.make_identity_dict(range(256))
rot13_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Filter API
def rot13(infile, outfile):
outfile.write(codecs.encode(infile.read(), 'rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
| lgpl-3.0 |
woutersmet/Molmodsummer | lib/molmod/io/dlpoly.py | 1 | 9363 | # MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2008 Toon Verstraelen <[email protected]>
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from molmod.units import ps, amu, A, atm, deg
from molmod.io.common import slice_match
import numpy
__all__ = ["Error", "HistoryReader", "OutputReader"]
class Error(Exception):
pass
class HistoryReader(object):
def __init__(self, filename, sub=slice(None), pos_unit=A, vel_unit=A/ps, frc_unit=amu*A/ps**2, time_unit=ps, mass_unit=amu):
self._f = file(filename)
self._sub = sub
self.pos_unit = pos_unit
self.vel_unit = vel_unit
self.frc_unit = frc_unit
self.time_unit = time_unit
self.mass_unit = mass_unit
try:
self.header = self._f.next()[:-1]
integers = tuple(int(word) for word in self._f.next().split())
if len(integers) != 3:
raise Error("Second line must contain three integers.")
self.keytrj, self.imcon, self.num_atoms = integers
except StopIteration:
raise Error("File is too short. Could not read header.")
except ValueError:
raise Error("Second line must contain three integers.")
self._counter = 1
self._frame_size = 4 + self.num_atoms*(self.keytrj+2)
def __del__(self):
self._f.close()
def __iter__(self):
return self
def next(self):
# auxiliary read function
def read_three(msg):
# read three words as floating point numbers
line = self._f.next()
try:
return [float(line[:12]), float(line[12:24]), float(line[24:])]
except ValueError:
raise Error(msg)
# skip frames as requested
while not slice_match(self._sub, self._counter):
for i in xrange(self._frame_size):
self._f.next()
self._counter += 1
frame = {}
# read the frame header line
words = self._f.next().split()
if len(words) != 6:
raise Error("The first line of each time frame must contain 6 words. (%i'th frame)" % self._counter)
if words[0] != "timestep":
raise Error("The first word of the first line of each time frame must be 'timestep'. (%i'th frame)" % self._counter)
try:
step = int(words[1])
frame["step"] = step
if int(words[2]) != self.num_atoms:
raise Error("The number of atoms has changed. (%i'th frame, %i'th step)" % (self._counter, step))
if int(words[3]) != self.keytrj:
raise Error("keytrj has changed. (%i'th frame, %i'th step)" % (self._counter, step))
if int(words[4]) != self.imcon:
raise Error("imcon has changed. (%i'th frame, %i'th step)" % (self._counter, step))
frame["timestep"] = float(words[5])*self.time_unit
frame["time"] = frame["timestep"]*step # this is ugly, or wait ... dlpoly is a bit ugly. we are not to blame!
except ValueError:
raise Error("Could not convert all numbers on the first line of the current time frame. (%i'th frame)" % self._counter)
# the three cell lines
cell = numpy.zeros((3,3), float)
frame["cell"] = cell
cell_msg = "The cell lines must consist of three floating point values. (%i'th frame, %i'th step)" % (self._counter, step)
for i in xrange(3):
cell[:,i] = read_three(cell_msg)
cell *= self.pos_unit
# the atoms
symbols = []
frame["symbols"] = symbols
masses = numpy.zeros(self.num_atoms, float)
frame["masses"] = masses
charges = numpy.zeros(self.num_atoms, float)
frame["charges"] = charges
pos = numpy.zeros((self.num_atoms,3), float)
frame["pos"] = pos
if self.keytrj > 0:
vel = numpy.zeros((self.num_atoms,3), float)
frame["vel"] = vel
if self.keytrj > 1:
frc = numpy.zeros((self.num_atoms,3), float)
frame["frc"] = frc
for i in xrange(self.num_atoms):
# the atom header line
words = self._f.next().split()
if len(words) != 4:
raise Error("The atom header line must contain 4 words. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1))
symbols.append(words[0])
try:
masses[i] = float(words[2])*self.mass_unit
charges[i] = float(words[3])
except ValueError:
raise Error("The numbers in the atom header line could not be interpreted.")
# the pos line
pos_msg = "The position lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
pos[i] = read_three(pos_msg)
if self.keytrj > 0:
vel_msg = "The velocity lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
vel[i] = read_three(vel_msg)
if self.keytrj > 1:
frc_msg = "The force lines must consist of three floating point values. (%i'th frame, %i'th step, %i'th atom)" % (self._counter, step, i+1)
frc[i] = read_three(frc_msg)
pos *= self.pos_unit # convert to au
if self.keytrj > 0:
vel *= self.vel_unit # convert to au
if self.keytrj > 1:
frc *= self.frc_unit # convert to au
# done
self._counter += 1
return frame
class OutputReader(object):
_marker = " " + "-"*130
def __init__(self, filename, sub=slice(None), skip_equi_period=True, pos_unit=A, time_unit=ps, angle_unit=deg, e_unit=amu/(A/ps)**2):
self._f = file(filename)
self._sub = sub
self.skip_equi_period = skip_equi_period
self._counter = 1
self._conv = [
1, e_unit, 1, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit,
time_unit, e_unit, 1, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit, e_unit,
1, pos_unit**3, 1, e_unit, e_unit, angle_unit, angle_unit, angle_unit, e_unit, 1000*atm,
]
self.last_step = None
# find the line that gives the number of equilibration steps:
try:
while True:
line = self._f.next()
if line.startswith(" equilibration period"):
self.equi_period = int(line[30:])
break
except StopIteration:
raise Error("DL_POLY OUTPUT file is too short. Could not find line with the number of equilibration steps.")
except ValueError:
raise Error("Could not read the number of equilibration steps. (expecting an integer)")
def __del__(self):
self._f.close()
def __iter__(self):
return self
def next(self):
def goto_next_frame():
marked = False
while True:
line = self._f.next()[:-1]
if marked and len(line) > 0 and not line.startswith(" --------"):
try:
step = int(line[:10])
return step, line
except ValueError:
pass
marked = (len(line) == 131 and line == self._marker)
while True:
step, line = goto_next_frame()
if (not self.skip_equi_period or step >= self.equi_period) and \
step != self.last_step:
break
# skip frames as requested
while not slice_match(self._sub, self._counter):
step, line = goto_next_frame()
self._counter += 1
# now really read these three lines
try:
row = [step]
for i in xrange(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
line = self._f.next()[:-1]
row.append(float(line[:10]))
for i in xrange(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
line = self._f.next()[:-1]
row.append(float(line[:10]))
for i in xrange(9):
row.append(float(line[10+i*12:10+(i+1)*12]))
except ValueError:
raise Error("Some numbers in the output file could not be read. (expecting floating point numbers)")
# convert all the numbers to atomic units
for i in xrange(30):
row[i] *= self._conv[i]
# done
self.last_step = step
return row
| gpl-3.0 |
Empeeric/dirometer | django/views/csrf.py | 289 | 3834 | from django.http import HttpResponseForbidden
from django.template import Context, Template
from django.conf import settings
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
CSRF_FAILRE_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Forbidden <span>(403)</span></h1>
<p>CSRF verification failed. Request aborted.</p>
{% if no_referer %}
<p>You are seeing this message because this HTTPS site requires a 'Referer
header' to be sent by your Web browser, but none was sent. This header is
required for security reasons, to ensure that your browser is not being
hijacked by third parties.</p>
<p>If you have configured your browser to disable 'Referer' headers, please
re-enable them, at least for this site, or for HTTPS connections, or for
'same-origin' requests.</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href='http://docs.djangoproject.com/en/dev/ref/contrib/csrf/#ref-contrib-csrf'>Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>The view function uses <a
href='http://docs.djangoproject.com/en/dev/ref/templates/api/#subclassing-context-requestcontext'><code>RequestContext</code></a>
for the template, instead of <code>Context</code>.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>More information is available with DEBUG=True.</small></p>
</div>
{% endif %}
</body>
</html>
"""
def csrf_failure(request, reason=""):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER
t = Template(CSRF_FAILRE_TEMPLATE)
c = Context({'DEBUG': settings.DEBUG,
'reason': reason,
'no_referer': reason == REASON_NO_REFERER
})
return HttpResponseForbidden(t.render(c), mimetype='text/html')
| mit |
lucalianas/openmicroscopy | components/tools/OmeroPy/src/omero/gateway/scripts/testdb_create.py | 9 | 9835 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Library for gateway tests
Copyright 2009 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import omero
from omero.rtypes import rstring
from omero.gateway.scripts import dbhelpers
dbhelpers.USERS = {
'user': dbhelpers.UserEntry(
'weblitz_test_user', 'foobar', 'User', 'Weblitz'),
'author': dbhelpers.UserEntry(
'weblitz_test_author', 'foobar', 'Author', 'Weblitz'),
}
dbhelpers.PROJECTS = {
'testpr1': dbhelpers.ProjectEntry('weblitz_test_priv_project', 'author'),
'testpr2': dbhelpers.ProjectEntry('weblitz_test_priv_project2', 'author'),
}
dbhelpers.DATASETS = {
'testds1': dbhelpers.DatasetEntry('weblitz_test_priv_dataset', 'testpr1'),
'testds2': dbhelpers.DatasetEntry('weblitz_test_priv_dataset2', 'testpr1'),
'testds3': dbhelpers.DatasetEntry('weblitz_test_priv_dataset3', 'testpr2'),
}
dbhelpers.IMAGES = {
'testimg1': dbhelpers.ImageEntry(
'weblitz_test_priv_image', 'CHOBI_d3d.dv', 'testds1'),
'testimg2': dbhelpers.ImageEntry(
'weblitz_test_priv_image2', 'CHOBI_d3d.dv', 'testds1'),
'tinyimg': dbhelpers.ImageEntry(
'weblitz_test_priv_image_tiny', 'tinyTest.d3d.dv', 'testds1'),
'badimg': dbhelpers.ImageEntry(
'weblitz_test_priv_image_bad', False, 'testds1'),
'tinyimg2': dbhelpers.ImageEntry(
'weblitz_test_priv_image_tiny2', 'tinyTest.d3d.dv', 'testds2'),
'tinyimg3': dbhelpers.ImageEntry(
'weblitz_test_priv_image_tiny3', 'tinyTest.d3d.dv', 'testds3'),
'bigimg': dbhelpers.ImageEntry(
'weblitz_test_priv_image_big', 'big.tiff', 'testds3'),
}
class TestDBHelper(object):
def setUp(self, skipTestDB=False, skipTestImages=True):
self.tmpfiles = []
self._has_connected = False
self._last_login = None
self.doDisconnect()
self.USER = dbhelpers.USERS['user']
self.AUTHOR = dbhelpers.USERS['author']
self.ADMIN = dbhelpers.ROOT
gateway = omero.client_wrapper()
try:
rp = gateway.getProperty('omero.rootpass')
if rp:
dbhelpers.ROOT.passwd = rp
finally:
gateway.seppuku()
self.prepTestDB(onlyUsers=skipTestDB, skipImages=skipTestImages)
self.doDisconnect()
def doConnect(self):
if not self._has_connected:
self.gateway.connect()
self._has_connected = True
assert self.gateway.isConnected(), 'Can not connect'
assert self.gateway.keepAlive(
), 'Could not send keepAlive to connection'
self.gateway.setGroupForSession(
self.gateway.getEventContext().memberOfGroups[0])
def doDisconnect(self):
if self._has_connected and self.gateway:
self.doConnect()
self.gateway.seppuku()
assert not self.gateway.isConnected(), 'Can not disconnect'
self.gateway = None
self._has_connected = False
self._last_login = None
def doLogin(self, user=None, groupname=None):
l = (user, groupname)
if self._has_connected and self._last_login == l:
return self.doConnect()
self.doDisconnect()
if user:
self.gateway = dbhelpers.login(user, groupname)
else:
self.gateway = dbhelpers.loginAsPublic()
self.doConnect()
self._last_login = l
def loginAsAdmin(self):
self.doLogin(self.ADMIN)
def loginAsAuthor(self):
self.doLogin(self.AUTHOR)
def loginAsUser(self):
self.doLogin(self.USER)
def loginAsPublic(self):
self.doLogin()
def tearDown(self):
try:
if self.gateway is not None:
self.gateway.seppuku()
finally:
failure = False
for tmpfile in self.tmpfiles:
try:
tmpfile.close()
except:
print "Error closing:" + tmpfile
if failure:
raise Exception("Exception on client.closeSession")
def getTestProject(self):
return dbhelpers.getProject(self.gateway, 'testpr1')
def getTestProject2(self):
return dbhelpers.getProject(self.gateway, 'testpr2')
def getTestDataset(self, project=None):
return dbhelpers.getDataset(self.gateway, 'testds1', project)
def getTestDataset2(self, project=None):
return dbhelpers.getDataset(self.gateway, 'testds2', project)
def getTestImage(self, dataset=None, autocreate=False):
return dbhelpers.getImage(self.gateway, 'testimg1', forceds=dataset,
autocreate=autocreate)
def getTestImage2(self, dataset=None):
return dbhelpers.getImage(self.gateway, 'testimg2', dataset)
def getBadTestImage(self, dataset=None, autocreate=False):
return dbhelpers.getImage(self.gateway, 'badimg', forceds=dataset,
autocreate=autocreate)
def getTinyTestImage(self, dataset=None, autocreate=False):
return dbhelpers.getImage(self.gateway, 'tinyimg', forceds=dataset,
autocreate=autocreate)
def getTinyTestImage2(self, dataset=None, autocreate=False):
return dbhelpers.getImage(self.gateway, 'tinyimg2', forceds=dataset,
autocreate=autocreate)
def getTinyTestImage3(self, dataset=None, autocreate=False):
return dbhelpers.getImage(self.gateway, 'tinyimg3', forceds=dataset,
autocreate=autocreate)
def getBigTestImage(self, dataset=None, autocreate=False):
return dbhelpers.getImage(self.gateway, 'bigimg', forceds=dataset,
autocreate=autocreate)
def prepTestDB(self, onlyUsers=False, skipImages=True):
dbhelpers.bootstrap(onlyUsers=onlyUsers, skipImages=skipImages)
def waitOnCmd(self, client, handle):
callback = omero.callbacks.CmdCallbackI(client, handle)
callback.loop(10, 500) # throws on timeout
rsp = callback.getResponse()
assert isinstance(rsp, omero.cmd.OK)
return callback
def createPDTree(self, project=None, dataset=None):
"""
Create/link a Project and/or Dataset (link them if both are specified)
Existing objects can be parsed as an omero.model object(s) or blitz
Wrapper objects. Otherwise new objects will be created with name
str(project) or str(dataset). If project OR dataset is specified, the
ProjectWrapper or DatasetWrapper is returned. If both project and
dataset are specified, they will be linked and the PD-link is returned
as a BlitzObjectWrapper.
@param project: omero.model.ProjectDatasetLinkI
OR omero.gateway.ProjectWrapper
or name (string)
@param dataset: omero.model.DatasetI
OR omero.gateway.DatasetWrapper
or name (string)
"""
dsId = ds = None
prId = pr = None
returnVal = None
if dataset is not None:
try:
dsId = dataset.id
dsId = dsId.val
except:
ds = omero.model.DatasetI()
ds.name = rstring(str(dataset))
ds = self.gateway.getUpdateService().saveAndReturnObject(ds)
returnVal = omero.gateway.DatasetWrapper(self.gateway, ds)
dsId = ds.id.val
if project is not None:
try:
prId = project.id
prId = prId.val
except:
pr = omero.model.ProjectI()
pr.name = rstring(str(project))
pr = self.gateway.getUpdateService().saveAndReturnObject(pr)
returnVal = omero.gateway.ProjectWrapper(self.gateway, pr)
prId = pr.id.val
if dsId and prId:
link = omero.model.ProjectDatasetLinkI()
link.setParent(omero.model.ProjectI(prId, False))
link.setChild(omero.model.DatasetI(dsId, False))
link = self.gateway.getUpdateService().saveAndReturnObject(link)
returnVal = omero.gateway.BlitzObjectWrapper(self.gateway, link)
return returnVal
def createTestImage(self, imageName="testImage", dataset=None, sizeX=16,
sizeY=16, sizeZ=1, sizeC=1, sizeT=1):
"""
Creates a test image of the required dimensions, where each pixel
value is set to the average value of x & y. If dataset (obj or name)
is specified, will be linked to image. If project (obj or name) is
specified, will be created/linked to dataset (if dataset not None)
@param dataset: omero.model.DatasetI
OR DatasetWrapper
OR dataset ID
"""
from numpy import fromfunction, int16
def f(x, y):
return x
def planeGen():
for p in range(sizeZ * sizeC * sizeT):
yield fromfunction(f, (sizeY, sizeX), dtype=int16)
ds = None
if dataset is not None:
if hasattr(dataset, "_obj"):
dataset = dataset._obj
if isinstance(dataset, omero.model.DatasetI):
ds = dataset
else:
try:
dsId = long(dataset)
ds = omero.model.DatasetI(dsId, False)
except:
pass
image = self.gateway.createImageFromNumpySeq(
planeGen(), imageName, sizeZ=sizeZ, sizeC=sizeC, sizeT=sizeT,
dataset=ds)
return image
| gpl-2.0 |
blacklin/kbengine | kbe/src/lib/python/Lib/test/test_pickle.py | 72 | 4522 | import pickle
import io
import collections
from test import support
from test.pickletester import AbstractPickleTests
from test.pickletester import AbstractPickleModuleTests
from test.pickletester import AbstractPersistentPicklerTests
from test.pickletester import AbstractPicklerUnpicklerObjectTests
from test.pickletester import AbstractDispatchTableTests
from test.pickletester import BigmemPickleTests
try:
import _pickle
has_c_implementation = True
except ImportError:
has_c_implementation = False
class PickleTests(AbstractPickleModuleTests):
pass
class PyPicklerTests(AbstractPickleTests):
pickler = pickle._Pickler
unpickler = pickle._Unpickler
def dumps(self, arg, proto=None):
f = io.BytesIO()
p = self.pickler(f, proto)
p.dump(arg)
f.seek(0)
return bytes(f.read())
def loads(self, buf, **kwds):
f = io.BytesIO(buf)
u = self.unpickler(f, **kwds)
return u.load()
class InMemoryPickleTests(AbstractPickleTests, BigmemPickleTests):
pickler = pickle._Pickler
unpickler = pickle._Unpickler
def dumps(self, arg, protocol=None):
return pickle.dumps(arg, protocol)
def loads(self, buf, **kwds):
return pickle.loads(buf, **kwds)
class PyPersPicklerTests(AbstractPersistentPicklerTests):
pickler = pickle._Pickler
unpickler = pickle._Unpickler
def dumps(self, arg, proto=None):
class PersPickler(self.pickler):
def persistent_id(subself, obj):
return self.persistent_id(obj)
f = io.BytesIO()
p = PersPickler(f, proto)
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf, **kwds):
class PersUnpickler(self.unpickler):
def persistent_load(subself, obj):
return self.persistent_load(obj)
f = io.BytesIO(buf)
u = PersUnpickler(f, **kwds)
return u.load()
class PyPicklerUnpicklerObjectTests(AbstractPicklerUnpicklerObjectTests):
pickler_class = pickle._Pickler
unpickler_class = pickle._Unpickler
class PyDispatchTableTests(AbstractDispatchTableTests):
pickler_class = pickle._Pickler
def get_dispatch_table(self):
return pickle.dispatch_table.copy()
class PyChainDispatchTableTests(AbstractDispatchTableTests):
pickler_class = pickle._Pickler
def get_dispatch_table(self):
return collections.ChainMap({}, pickle.dispatch_table)
if has_c_implementation:
class CPicklerTests(PyPicklerTests):
pickler = _pickle.Pickler
unpickler = _pickle.Unpickler
class CPersPicklerTests(PyPersPicklerTests):
pickler = _pickle.Pickler
unpickler = _pickle.Unpickler
class CDumpPickle_LoadPickle(PyPicklerTests):
pickler = _pickle.Pickler
unpickler = pickle._Unpickler
class DumpPickle_CLoadPickle(PyPicklerTests):
pickler = pickle._Pickler
unpickler = _pickle.Unpickler
class CPicklerUnpicklerObjectTests(AbstractPicklerUnpicklerObjectTests):
pickler_class = _pickle.Pickler
unpickler_class = _pickle.Unpickler
def test_issue18339(self):
unpickler = self.unpickler_class(io.BytesIO())
with self.assertRaises(TypeError):
unpickler.memo = object
# used to cause a segfault
with self.assertRaises(ValueError):
unpickler.memo = {-1: None}
unpickler.memo = {1: None}
class CDispatchTableTests(AbstractDispatchTableTests):
pickler_class = pickle.Pickler
def get_dispatch_table(self):
return pickle.dispatch_table.copy()
class CChainDispatchTableTests(AbstractDispatchTableTests):
pickler_class = pickle.Pickler
def get_dispatch_table(self):
return collections.ChainMap({}, pickle.dispatch_table)
def test_main():
tests = [PickleTests, PyPicklerTests, PyPersPicklerTests,
PyDispatchTableTests, PyChainDispatchTableTests]
if has_c_implementation:
tests.extend([CPicklerTests, CPersPicklerTests,
CDumpPickle_LoadPickle, DumpPickle_CLoadPickle,
PyPicklerUnpicklerObjectTests,
CPicklerUnpicklerObjectTests,
CDispatchTableTests, CChainDispatchTableTests,
InMemoryPickleTests])
support.run_unittest(*tests)
support.run_doctest(pickle)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
mottosso/pyblish-magenta | pyblish_magenta/vendor/cquery/vendor/nose/result.py | 68 | 6711 | """
Test Result
-----------
Provides a TextTestResult that extends unittest's _TextTestResult to
provide support for error classes (such as the builtin skip and
deprecated classes), and hooks for plugins to take over or extend
reporting.
"""
import logging
try:
# 2.7+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
from nose.config import Config
from nose.util import isclass, ln as _ln # backwards compat
log = logging.getLogger('nose.result')
def _exception_detail(exc):
# this is what stdlib module traceback does
try:
return str(exc)
except:
return '<unprintable %s object>' % type(exc).__name__
class TextTestResult(_TextTestResult):
"""Text test result that extends unittest's default test result
support for a configurable set of errorClasses (eg, Skip,
Deprecated, TODO) that extend the errors/failures/success triad.
"""
def __init__(self, stream, descriptions, verbosity, config=None,
errorClasses=None):
if errorClasses is None:
errorClasses = {}
self.errorClasses = errorClasses
if config is None:
config = Config()
self.config = config
_TextTestResult.__init__(self, stream, descriptions, verbosity)
def addSkip(self, test, reason):
# 2.7 skip compat
from nose.plugins.skip import SkipTest
if SkipTest in self.errorClasses:
storage, label, isfail = self.errorClasses[SkipTest]
storage.append((test, reason))
self.printLabel(label, (SkipTest, reason, None))
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in self.errorClasses.items():
#if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
# from nose.tools import set_trace
# set_trace()
if isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
self.printLabel(label, err)
return
self.errors.append((test, exc_info))
test.passed = False
self.printLabel('ERROR')
# override to bypass changes in 2.7
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def printLabel(self, label, err=None):
# Might get patched into a streamless result
stream = getattr(self, 'stream', None)
if stream is not None:
if self.showAll:
message = [label]
if err:
detail = _exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
def printErrors(self):
"""Overrides to print all errorClasses errors as well.
"""
_TextTestResult.printErrors(self)
for cls in self.errorClasses.keys():
storage, label, isfail = self.errorClasses[cls]
if isfail:
self.printErrorList(label, storage)
# Might get patched into a result with no config
if hasattr(self, 'config'):
self.config.plugins.report(self.stream)
def printSummary(self, start, stop):
"""Called by the test runner to print the final summary of test
run results.
"""
write = self.stream.write
writeln = self.stream.writeln
taken = float(stop - start)
run = self.testsRun
plural = run != 1 and "s" or ""
writeln(self.separator2)
writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
writeln()
summary = {}
eckeys = self.errorClasses.keys()
for cls in eckeys:
storage, label, isfail = self.errorClasses[cls]
count = len(storage)
if not count:
continue
summary[label] = count
if len(self.failures):
summary['failures'] = len(self.failures)
if len(self.errors):
summary['errors'] = len(self.errors)
if not self.wasSuccessful():
write("FAILED")
else:
write("OK")
items = summary.items()
if items:
items.sort()
write(" (")
write(", ".join(["%s=%s" % (label, count) for
label, count in items]))
writeln(")")
else:
writeln()
def wasSuccessful(self):
"""Overrides to check that there are no errors in errorClasses
lists that are marked as errors and should cause a run to
fail.
"""
if self.errors or self.failures:
return False
for cls in self.errorClasses.keys():
storage, label, isfail = self.errorClasses[cls]
if not isfail:
continue
if storage:
return False
return True
def _addError(self, test, err):
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3: does not take test arg
exc_info = self._exc_info_to_string(err)
self.errors.append((test, exc_info))
if self.showAll:
self.stream.write('ERROR')
elif self.dots:
self.stream.write('E')
def _exc_info_to_string(self, err, test=None):
# 2.7 skip compat
from nose.plugins.skip import SkipTest
if isclass(err[0]) and issubclass(err[0], SkipTest):
return str(err[1])
# 2.3/2.4 -- 2.4 passes test, 2.3 does not
try:
return _TextTestResult._exc_info_to_string(self, err, test)
except TypeError:
# 2.3: does not take test arg
return _TextTestResult._exc_info_to_string(self, err)
def ln(*arg, **kw):
from warnings import warn
warn("ln() has moved to nose.util from nose.result and will be removed "
"from nose.result in a future release. Please update your imports ",
DeprecationWarning)
return _ln(*arg, **kw)
| lgpl-3.0 |
ivankelly/Rhythmbox-Spotify-Plugin | plugins/rb/__init__.py | 2 | 3597 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright 2006, James Livingston <[email protected]>
# Copyright 2006, Ed Catmur <[email protected]>
# Copyright 2007, Jonathan Matthew
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os.path
import os
import gtk
# rb classes
from Loader import Loader
from Loader import ChunkLoader
from Loader import UpdateCheck
from Coroutine import Coroutine
#def _excepthandler (exc_class, exc_inst, trace):
# import sys
# # print out stuff ignoring our debug redirect
# sys.__excepthook__ (exc_class, exc_inst, trace)
def try_load_icon(theme, icon, size, flags):
try:
return theme.load_icon(icon, size, flags)
except:
return None
def append_plugin_source_path(theme, iconpath):
# check for a Makefile.am in the dir the file was loaded from
fr = sys._getframe(1)
co = fr.f_code
filename = co.co_filename
# and if found, append the icon path
dir = filename[:filename.rfind(os.sep)]
if os.path.exists(dir + "/Makefile.am"):
plugindir = dir[:dir.rfind(os.sep)]
icondir = plugindir + iconpath
theme.append_search_path(icondir)
def show_uri(uri):
# use gtk_show_uri if available, otherwise use gnome-vfs
if hasattr(gtk, 'show_uri'):
gtk.show_uri(gtk.gdk.Screen(), uri, 0)
else:
import gnomevfs
gnomevfs.url_show(uri)
class _rbdebugfile:
def __init__(self, fn):
self.fn = fn
def write(self, str):
if str == '\n':
return
import rb
fr = sys._getframe(1)
co = fr.f_code
filename = co.co_filename
# strip off the cwd, for if running uninstalled
cwd = os.getcwd()
if cwd[-1] != os.sep:
cwd += os.sep
if filename[:len(cwd)] == cwd:
filename = filename[len(cwd):]
# add the class name to the method, if 'self' exists
methodname = co.co_name
if fr.f_locals.has_key('self'):
methodname = '%s.%s' % (fr.f_locals['self'].__class__.__name__, methodname)
rb._debug (methodname, filename, co.co_firstlineno + fr.f_lineno, True, str)
def close(self): pass
def flush(self): pass
def fileno(self): return self.fn
def isatty(self): return 0
def read(self, a): return ''
def readline(self): return ''
def readlines(self): return []
writelines = write
def seek(self, a): raise IOError, (29, 'Illegal seek')
def tell(self): raise IOError, (29, 'Illegal seek')
truncate = tell
sys.stdout = _rbdebugfile(sys.stdout.fileno())
#sys.excepthook = _excepthandler
| gpl-2.0 |
Intel-bigdata/s3-tests | s3tests/functional/__init__.py | 3 | 12312 | import ConfigParser
import boto.exception
import boto.s3.connection
import bunch
import itertools
import os
import random
import string
from .utils import region_sync_meta
s3 = bunch.Bunch()
config = bunch.Bunch()
targets = bunch.Bunch()
# this will be assigned by setup()
prefix = None
calling_formats = dict(
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
subdomain=boto.s3.connection.SubdomainCallingFormat(),
vhost=boto.s3.connection.VHostCallingFormat(),
)
def get_prefix():
assert prefix is not None
return prefix
def is_slow_backend():
return slow_backend
def choose_bucket_prefix(template, max_len=30):
"""
Choose a prefix for our test buckets, so they're easy to identify.
Use template and feed it more and more random filler, until it's
as long as possible but still below max_len.
"""
rand = ''.join(
random.choice(string.ascii_lowercase + string.digits)
for c in range(255)
)
while rand:
s = template.format(random=rand)
if len(s) <= max_len:
return s
rand = rand[:-1]
raise RuntimeError(
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
template=template,
),
)
def nuke_prefixed_buckets_on_conn(prefix, name, conn):
print 'Cleaning buckets from connection {name} prefix {prefix!r}.'.format(
name=name,
prefix=prefix,
)
for bucket in conn.get_all_buckets():
print 'prefix=',prefix
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
success = False
for i in xrange(2):
try:
try:
iterator = iter(bucket.list_versions())
# peek into iterator to issue list operation
try:
keys = itertools.chain([next(iterator)], iterator)
except StopIteration:
keys = [] # empty iterator
except boto.exception.S3ResponseError as e:
# some S3 implementations do not support object
# versioning - fall back to listing without versions
if e.error_code != 'NotImplemented':
raise e
keys = bucket.list();
for key in keys:
print 'Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
# key.set_canned_acl('private')
bucket.delete_key(key.name, version_id = key.version_id)
bucket.delete()
success = True
except boto.exception.S3ResponseError as e:
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
raise
# seems like we don't have permissions set appropriately, we'll
# modify permissions and retry
pass
if success:
return
bucket.set_canned_acl('private')
def nuke_prefixed_buckets(prefix):
# If no regions are specified, use the simple method
if targets.main.master == None:
for name, conn in s3.items():
print 'Deleting buckets on {name}'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
else:
# First, delete all buckets on the master connection
for name, conn in s3.items():
if conn == targets.main.master.connection:
print 'Deleting buckets on {name} (master)'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
# Then sync to propagate deletes to secondaries
region_sync_meta(targets.main, targets.main.master.connection)
print 'region-sync in nuke_prefixed_buckets'
# Now delete remaining buckets on any other connection
for name, conn in s3.items():
if conn != targets.main.master.connection:
print 'Deleting buckets on {name} (non-master)'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
print 'Done with cleanup of test buckets.'
class TargetConfig:
def __init__(self, cfg, section):
self.port = None
self.api_name = ''
self.is_master = False
self.is_secure = False
self.sync_agent_addr = None
self.sync_agent_port = 0
self.sync_meta_wait = 0
try:
self.api_name = cfg.get(section, 'api_name')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.port = cfg.getint(section, 'port')
except ConfigParser.NoOptionError:
pass
try:
self.host=cfg.get(section, 'host')
except ConfigParser.NoOptionError:
raise RuntimeError(
'host not specified for section {s}'.format(s=section)
)
try:
self.is_master=cfg.getboolean(section, 'is_master')
except ConfigParser.NoOptionError:
pass
try:
self.is_secure=cfg.getboolean(section, 'is_secure')
except ConfigParser.NoOptionError:
pass
try:
raw_calling_format = cfg.get(section, 'calling_format')
except ConfigParser.NoOptionError:
raw_calling_format = 'ordinary'
try:
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.calling_format = calling_formats[raw_calling_format]
except KeyError:
raise RuntimeError(
'calling_format unknown: %r' % raw_calling_format
)
class TargetConnection:
def __init__(self, conf, conn):
self.conf = conf
self.connection = conn
class RegionsInfo:
def __init__(self):
self.m = bunch.Bunch()
self.master = None
self.secondaries = []
def add(self, name, region_config):
self.m[name] = region_config
if (region_config.is_master):
if not self.master is None:
raise RuntimeError(
'multiple regions defined as master'
)
self.master = region_config
else:
self.secondaries.append(region_config)
def get(self, name):
return self.m[name]
def get(self):
return self.m
def iteritems(self):
return self.m.iteritems()
regions = RegionsInfo()
class RegionsConn:
def __init__(self):
self.m = bunch.Bunch()
self.default = None
self.master = None
self.secondaries = []
def iteritems(self):
return self.m.iteritems()
def set_default(self, conn):
self.default = conn
def add(self, name, conn):
self.m[name] = conn
if not self.default:
self.default = conn
if (conn.conf.is_master):
self.master = conn
else:
self.secondaries.append(conn)
# nosetests --processes=N with N>1 is safe
_multiprocess_can_split_ = True
def setup():
cfg = ConfigParser.RawConfigParser()
try:
path = os.environ['S3TEST_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable S3TEST_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
global prefix
global targets
global slow_backend
try:
template = cfg.get('fixtures', 'bucket prefix')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
template = 'test-{random}-'
prefix = choose_bucket_prefix(template=template)
try:
slow_backend = cfg.getboolean('fixtures', 'slow backend')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
slow_backend = False
# pull the default_region out, if it exists
try:
default_region = cfg.get('fixtures', 'default_region')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
default_region = None
s3.clear()
config.clear()
for section in cfg.sections():
try:
(type_, name) = section.split(None, 1)
except ValueError:
continue
if type_ != 'region':
continue
regions.add(name, TargetConfig(cfg, section))
for section in cfg.sections():
try:
(type_, name) = section.split(None, 1)
except ValueError:
continue
if type_ != 's3':
continue
if len(regions.get()) == 0:
regions.add("default", TargetConfig(cfg, section))
config[name] = bunch.Bunch()
for var in [
'user_id',
'display_name',
'email',
]:
try:
config[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
pass
targets[name] = RegionsConn()
for (k, conf) in regions.iteritems():
conn = boto.s3.connection.S3Connection(
aws_access_key_id=cfg.get(section, 'access_key'),
aws_secret_access_key=cfg.get(section, 'secret_key'),
is_secure=conf.is_secure,
port=conf.port,
host=conf.host,
# TODO test vhost calling format
calling_format=conf.calling_format,
)
temp_targetConn = TargetConnection(conf, conn)
targets[name].add(k, temp_targetConn)
# Explicitly test for and set the default region, if specified.
# If it was not specified, use the 'is_master' flag to set it.
if default_region:
if default_region == name:
targets[name].set_default(temp_targetConn)
elif conf.is_master:
targets[name].set_default(temp_targetConn)
s3[name] = targets[name].default.connection
# WARNING! we actively delete all buckets we see with the prefix
# we've chosen! Choose your prefix with care, and don't reuse
# credentials!
# We also assume nobody else is going to use buckets with that
# prefix. This is racy but given enough randomness, should not
# really fail.
nuke_prefixed_buckets(prefix=prefix)
def teardown():
# remove our buckets here also, to avoid littering
nuke_prefixed_buckets(prefix=prefix)
bucket_counter = itertools.count(1)
def get_new_bucket_name():
"""
Get a bucket name that probably does not exist.
We make every attempt to use a unique random prefix, so if a
bucket by this name happens to exist, it's ok if tests give
false negatives.
"""
name = '{prefix}{num}'.format(
prefix=prefix,
num=next(bucket_counter),
)
return name
def get_new_bucket(target=None, name=None, headers=None):
"""
Get a bucket that exists and is empty.
Always recreates a bucket from scratch. This is useful to also
reset ACLs and such.
"""
if target is None:
target = targets.main.default
connection = target.connection
if name is None:
name = get_new_bucket_name()
# the only way for this to fail with a pre-existing bucket is if
# someone raced us between setup nuke_prefixed_buckets and here;
# ignore that as astronomically unlikely
bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers)
return bucket
| mit |
imtapps/django-imt-fork | django/db/backends/__init__.py | 52 | 38612 | from django.db.utils import DatabaseError
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.importlib import import_module
from django.utils import six
from django.utils.timezone import is_aware
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
self._thread_ident = thread.get_ident()
self.allow_thread_sharing = allow_thread_sharing
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.alias)
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def abort(self):
"""
Roll back any ongoing transaction and clean the transaction state
stack.
"""
if self._dirty:
self._rollback()
self._dirty = False
while self.transaction_state:
self.leave_transaction_management()
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError(
"This code isn't under transaction management")
# We will pass the next status (after leaving the previous state
# behind) to subclass hook.
self._leave_transaction_management(self.is_managed())
if self._dirty:
self.rollback()
raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK")
self._dirty = False
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if (not self.allow_thread_sharing
and self._thread_ident != thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_commit(sid)
@contextmanager
def constraint_checks_disabled(self):
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key constraint
checking.
"""
pass
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered.
"""
pass
def close(self):
self.validate_thread_sharing()
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_autocommit = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"Confirm support for transactions"
try:
# Make sure to run inside a managed transaction block,
# otherwise autocommit will cause the confimation to
# fail.
self.connection.enter_transaction_management()
self.connection.managed(True)
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
self.connection._dirty = False
finally:
self.connection.leave_transaction_management()
return count == 0
@cached_property
def supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
return True
except NotImplementedError:
return False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns a SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import force_text
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return force_text(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import force_text
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent type
that is compatible with the field type.
"""
if value is None:
return value
internal_type = field.get_internal_type()
if internal_type == 'FloatField':
return float(value)
elif (internal_type and (internal_type.endswith('IntegerField')
or internal_type == 'AutoField')):
return int(value)
return value
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
if cursor is None:
cursor = self.connection.cursor()
return sorted(self.get_table_list(cursor))
def get_table_list(self, cursor):
"""
Returns an unsorted list of names of all tables that exist in the
database.
"""
raise NotImplementedError
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = list(map(self.table_name_converter, tables))
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
"""
for column in six.iteritems(self.get_indexes(cursor, table_name)):
if column[1]['primary_key']:
return column[0]
return None
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| bsd-3-clause |
westinedu/newertrends | django/views/generic/date_based.py | 246 | 14025 | import datetime
import time
from django.template import loader, RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.xheaders import populate_xheaders
from django.db.models.fields import DateTimeField
from django.http import Http404, HttpResponse
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
PendingDeprecationWarning
)
def archive_index(request, queryset, date_field, num_latest=15,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
mimetype=None, allow_future=False, template_object_name='latest'):
"""
Generic top-level archive of date-based objects.
Templates: ``<app_label>/<model_name>_archive.html``
Context:
date_list
List of years
latest
Latest N (defaults to 15) objects by date
"""
if extra_context is None: extra_context = {}
model = queryset.model
if not allow_future:
queryset = queryset.filter(**{'%s__lte' % date_field: datetime.datetime.now()})
date_list = queryset.dates(date_field, 'year')[::-1]
if not date_list and not allow_empty:
raise Http404("No %s available" % model._meta.verbose_name)
if date_list and num_latest:
latest = queryset.order_by('-'+date_field)[:num_latest]
else:
latest = None
if not template_name:
template_name = "%s/%s_archive.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list' : date_list,
template_object_name : latest,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_year(request, year, queryset, date_field, template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object', mimetype=None,
make_object_list=False, allow_future=False):
"""
Generic yearly archive view.
Templates: ``<app_label>/<model_name>_archive_year.html``
Context:
date_list
List of months in this year with objects
year
This year
object_list
List of objects published in the given month
(Only available if make_object_list argument is True)
"""
if extra_context is None: extra_context = {}
model = queryset.model
now = datetime.datetime.now()
lookup_kwargs = {'%s__year' % date_field: year}
# Only bother to check current date if the year isn't in the past and future objects aren't requested.
if int(year) >= now.year and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
date_list = queryset.filter(**lookup_kwargs).dates(date_field, 'month')
if not date_list and not allow_empty:
raise Http404
if make_object_list:
object_list = queryset.filter(**lookup_kwargs)
else:
object_list = []
if not template_name:
template_name = "%s/%s_archive_year.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'year': year,
'%s_list' % template_object_name: object_list,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_month(request, year, month, queryset, date_field,
month_format='%b', template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic monthly archive view.
Templates: ``<app_label>/<model_name>_archive_month.html``
Context:
date_list:
List of days in this month with objects
month:
(date) this month
next_month:
(date) the first day of the next month, or None if the next month is in the future
previous_month:
(date) the first day of the previous month
object_list:
list of objects published in the given month
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the month isn't in the past and future objects are requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
date_list = object_list.dates(date_field, 'day')
if not object_list and not allow_empty:
raise Http404
# Calculate the next month, if applicable.
if allow_future:
next_month = last_day
elif last_day <= datetime.date.today():
next_month = last_day
else:
next_month = None
# Calculate the previous month
if first_day.month == 1:
previous_month = first_day.replace(year=first_day.year-1,month=12)
else:
previous_month = first_day.replace(month=first_day.month-1)
if not template_name:
template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'%s_list' % template_object_name: object_list,
'month': date,
'next_month': next_month,
'previous_month': previous_month,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_week(request, year, week, queryset, date_field,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic weekly archive view.
Templates: ``<app_label>/<model_name>_archive_week.html``
Context:
week:
(date) this week
object_list:
list of objects published in the given week
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime(year+'-0-'+week, '%Y-%w-%U')
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of week, for use in a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the week isn't in the past and future objects aren't requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
if not template_name:
template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'week': date,
})
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_day(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object',
mimetype=None, allow_future=False):
"""
Generic daily archive view.
Templates: ``<app_label>/<model_name>_archive_day.html``
Context:
object_list:
list of objects published that day
day:
(datetime) the day
previous_day
(datetime) the previous day
next_day
(datetime) the next day, or None if the current day is today
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not allow_empty and not object_list:
raise Http404
# Calculate the next day, if applicable.
if allow_future:
next_day = date + datetime.timedelta(days=1)
elif date < datetime.date.today():
next_day = date + datetime.timedelta(days=1)
else:
next_day = None
if not template_name:
template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'day': date,
'previous_day': date - datetime.timedelta(days=1),
'next_day': next_day,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_today(request, **kwargs):
"""
Generic daily archive view for today. Same as archive_day view.
"""
today = datetime.date.today()
kwargs.update({
'year': str(today.year),
'month': today.strftime('%b').lower(),
'day': str(today.day),
})
return archive_day(request, **kwargs)
def object_detail(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', object_id=None, slug=None,
slug_field='slug', template_name=None, template_name_field=None,
template_loader=loader, extra_context=None, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic detail view from year/month/day/slug or year/month/day/id structure.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object:
the object to be detailed
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError("Generic detail view must be called with either an object_id or a slug/slugfield")
try:
obj = queryset.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for" % model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
| bsd-3-clause |
keen99/SickRage | lib/github/CommitStatus.py | 49 | 4721 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class CommitStatus(github.GithubObject.NonCompletableGithubObject):
"""
This class represents CommitStatuss as returned for example by http://developer.github.com/v3/todo
"""
@property
def created_at(self):
"""
:type: datetime.datetime
"""
return self._created_at.value
@property
def creator(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
return self._creator.value
@property
def description(self):
"""
:type: string
"""
return self._description.value
@property
def id(self):
"""
:type: integer
"""
return self._id.value
@property
def state(self):
"""
:type: string
"""
return self._state.value
@property
def target_url(self):
"""
:type: string
"""
return self._target_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
return self._url.value
def _initAttributes(self):
self._created_at = github.GithubObject.NotSet
self._creator = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._target_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["creator"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "target_url" in attributes: # pragma no branch
self._target_url = self._makeStringAttribute(attributes["target_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
xuweiliang/Codelibrary | novaclient/exceptions.py | 1 | 8967 | # Copyright 2010 Jacob Kaplan-Moss
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
class UnsupportedVersion(Exception):
"""Indicates that the user is trying to use an unsupported
version of the API.
"""
pass
class UnsupportedAttribute(AttributeError):
"""Indicates that the user is trying to transmit the argument to a method,
which is not supported by selected version.
"""
def __init__(self, argument_name, start_version, end_version=None):
if end_version:
self.message = (
"'%(name)s' argument is only allowed for microversions "
"%(start)s - %(end)s." % {"name": argument_name,
"start": start_version,
"end": end_version})
else:
self.message = (
"'%(name)s' argument is only allowed since microversion "
"%(start)s." % {"name": argument_name, "start": start_version})
class CommandError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoUniqueMatch(Exception):
pass
class AuthSystemNotFound(Exception):
"""When the user specify a AuthSystem but not installed."""
def __init__(self, auth_system):
self.auth_system = auth_system
def __str__(self):
return "AuthSystemNotFound: %s" % repr(self.auth_system)
class NoTokenLookupException(Exception):
"""This form of authentication does not support looking up
endpoints from an existing token.
"""
pass
class EndpointNotFound(Exception):
"""Could not find Service or Region in Service Catalog."""
pass
class AmbiguousEndpoints(Exception):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
self.endpoints = endpoints
def __str__(self):
return "AmbiguousEndpoints: %s" % repr(self.endpoints)
class ConnectionRefused(Exception):
"""
Connection refused: the server refused the connection.
"""
def __init__(self, response=None):
self.response = response
def __str__(self):
return "ConnectionRefused: %s" % repr(self.response)
class ResourceInErrorState(Exception):
"""Resource is in the error state."""
def __init__(self, obj):
msg = "`%s` resource is in the error state" % obj.__class__.__name__
fault_msg = getattr(obj, "fault", {}).get("message")
if fault_msg:
msg += "due to '%s'" % fault_msg
self.message = "%s." % msg
class VersionNotFoundForAPIMethod(Exception):
msg_fmt = "API version '%(vers)s' is not supported on '%(method)s' method."
def __init__(self, version, method):
self.version = version
self.method = method
def __str__(self):
return self.msg_fmt % {"vers": self.version, "method": self.method}
class InstanceInDeletedState(Exception):
"""Instance is in the deleted state."""
pass
class ClientException(Exception):
"""
The base exception class for all exceptions this library raises.
"""
message = 'Unknown Error'
def __init__(self, code, message=None, details=None, request_id=None,
url=None, method=None):
self.code = code
self.message = message or self.__class__.message
self.details = details
self.request_id = request_id
self.url = url
self.method = method
def __str__(self):
formatted_string = "%s (HTTP %s)" % (self.message, self.code)
if self.request_id:
formatted_string += " (Request-ID: %s)" % self.request_id
return formatted_string
class RetryAfterException(ClientException):
"""
The base exception class for ClientExceptions that use Retry-After header.
"""
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RetryAfterException, self).__init__(*args, **kwargs)
class BadRequest(ClientException):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""
HTTP 404 - Not found
"""
http_status = 404
message = "Not found"
class MethodNotAllowed(ClientException):
"""
HTTP 405 - Method Not Allowed
"""
http_status = 405
message = "Method Not Allowed"
class NotAcceptable(ClientException):
"""
HTTP 406 - Not Acceptable
"""
http_status = 406
message = "Not Acceptable"
class Conflict(ClientException):
"""
HTTP 409 - Conflict
"""
http_status = 409
message = "Conflict"
class OverLimit(RetryAfterException):
"""
HTTP 413 - Over limit: you're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
class RateLimit(RetryAfterException):
"""
HTTP 429 - Rate limit: you've sent too many requests for this time period.
"""
http_status = 429
message = "Rate limit"
# NotImplemented is a python keyword.
class HTTPNotImplemented(ClientException):
"""
HTTP 501 - Not Implemented: the server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
# so we can do this:
# _code_map = dict((c.http_status, c)
# for c in ClientException.__subclasses__())
#
# Instead, we have to hardcode it:
_error_classes = [BadRequest, Unauthorized, Forbidden, NotFound,
MethodNotAllowed, NotAcceptable, Conflict, OverLimit,
RateLimit, HTTPNotImplemented]
_code_map = dict((c.http_status, c) for c in _error_classes)
class InvalidUsage(RuntimeError):
"""This function call is invalid in the way you are using this client.
Due to the transition to using keystoneauth some function calls are no
longer available. You should make a similar call to the session object
instead.
"""
pass
def from_response(response, body, url, method=None):
"""
Return an instance of an ClientException or subclass
based on a requests response.
Usage::
resp, body = requests.request(...)
if resp.status_code != 200:
raise exception_from_response(resp, rest.text)
"""
cls = _code_map.get(response.status_code, ClientException)
kwargs = {
'code': response.status_code,
'method': method,
'url': url,
'request_id': None,
}
if response.headers:
kwargs['request_id'] = response.headers.get('x-compute-request-id')
if (issubclass(cls, RetryAfterException) and
'retry-after' in response.headers):
kwargs['retry_after'] = response.headers.get('retry-after')
if body:
message = "n/a"
details = "n/a"
if hasattr(body, 'keys'):
# NOTE(mriedem): WebOb<1.6.0 will return a nested dict structure
# where the error keys to the message/details/code. WebOb>=1.6.0
# returns just a response body as a single dict, not nested,
# so we have to handle both cases (since we can't trust what we're
# given with content_type: application/json either way.
if 'message' in body:
# WebOb 1.6.0 case
message = body.get('message')
details = body.get('details')
else:
# WebOb<1.6.0 where we assume there is a single error message
# key to the body that has the message and details.
error = body[list(body)[0]]
message = error.get('message')
details = error.get('details')
kwargs['message'] = message
kwargs['details'] = details
return cls(**kwargs)
class ResourceNotFound(Exception):
"""Error in getting the resource."""
pass
| apache-2.0 |
QijunPan/ansible | lib/ansible/plugins/action/eos_config.py | 7 | 1247 | #
# Copyright 2015 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.net_config import ActionModule as NetworkActionModule
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(NetworkActionModule):
def run(self, tmp=None, task_vars=None):
display.vvvvv('Using connection plugin %s' % self._play_context.connection)
return NetworkActionModule.run(self, tmp, task_vars)
| gpl-3.0 |
ironbox360/django | tests/select_related_regress/models.py | 282 | 3677 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Building(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return "Building: %s" % self.name
@python_2_unicode_compatible
class Device(models.Model):
building = models.ForeignKey('Building', models.CASCADE)
name = models.CharField(max_length=10)
def __str__(self):
return "device '%s' in building %s" % (self.name, self.building)
@python_2_unicode_compatible
class Port(models.Model):
device = models.ForeignKey('Device', models.CASCADE)
port_number = models.CharField(max_length=10)
def __str__(self):
return "%s/%s" % (self.device.name, self.port_number)
@python_2_unicode_compatible
class Connection(models.Model):
start = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_start',
unique=True,
)
end = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_end',
unique=True,
)
def __str__(self):
return "%s to %s" % (self.start, self.end)
# Another non-tree hierarchy that exercises code paths similar to the above
# example, but in a slightly different configuration.
class TUser(models.Model):
name = models.CharField(max_length=200)
class Person(models.Model):
user = models.ForeignKey(TUser, models.CASCADE, unique=True)
class Organizer(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Student(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Class(models.Model):
org = models.ForeignKey(Organizer, models.CASCADE)
class Enrollment(models.Model):
std = models.ForeignKey(Student, models.CASCADE)
cls = models.ForeignKey(Class, models.CASCADE)
# Models for testing bug #8036.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
class ClientStatus(models.Model):
name = models.CharField(max_length=50)
class Client(models.Model):
name = models.CharField(max_length=50)
state = models.ForeignKey(State, models.SET_NULL, null=True)
status = models.ForeignKey(ClientStatus, models.CASCADE)
class SpecialClient(Client):
value = models.IntegerField()
# Some model inheritance exercises
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Child(Parent):
value = models.IntegerField()
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.SET_NULL, null=True)
def __str__(self):
return self.name
# Models for testing bug #19870.
@python_2_unicode_compatible
class Fowl(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Hen(Fowl):
pass
class Chick(Fowl):
mother = models.ForeignKey(Hen, models.CASCADE)
class Base(models.Model):
name = models.CharField(max_length=10)
lots_of_text = models.TextField()
class Meta:
abstract = True
class A(Base):
a_field = models.CharField(max_length=10)
class B(Base):
b_field = models.CharField(max_length=10)
class C(Base):
c_a = models.ForeignKey(A, models.CASCADE)
c_b = models.ForeignKey(B, models.CASCADE)
is_published = models.BooleanField(default=False)
| bsd-3-clause |
JackDanger/sentry | src/sentry/utils/versioning.py | 6 | 1883 | from __future__ import absolute_import
import six
from sentry.exceptions import InvalidConfiguration
from sentry.utils import warnings
class Version(tuple):
def __str__(self):
return '.'.join(map(six.binary_type, self))
def summarize(sequence, max=3):
items = sequence[:max]
remainder = len(sequence) - max
if remainder == 1:
items.append('and one other')
elif remainder > 1:
items.append('and %s others' % (remainder,))
return items
def make_upgrade_message(service, modality, version, hosts):
return '{service} {modality} be upgraded to {version} on {hosts}.'.format(
hosts=','.join(map(six.binary_type, summarize(hosts.keys(), 2))),
modality=modality,
service=service,
version=version,
)
def check_versions(service, versions, required, recommended=None):
"""
Check that hosts fulfill version requirements.
:param service: service label, such as ``Redis``
:param versions: mapping of host to ``Version``
:param required: lowest supported ``Version``. If any host does not fulfill
this requirement, an ``InvalidConfiguration`` exception is raised.
:param recommended: recommended version. If any host does not fulfill this
requirement, a ``PendingDeprecationWarning`` is raised.
"""
# x = (host, version)
must_upgrade = dict(filter(lambda x: required > x[1], versions.items()))
if must_upgrade:
raise InvalidConfiguration(make_upgrade_message(service, 'must', required, must_upgrade))
if recommended:
# x = (host, version)
should_upgrade = dict(filter(lambda x: recommended > x[1], versions.items()))
if should_upgrade:
warnings.warn(
make_upgrade_message(service, 'should', recommended, should_upgrade),
PendingDeprecationWarning,
)
| bsd-3-clause |
tildaslash/RatticWeb | cred/tests/test_ssh_key.py | 7 | 2827 | from django.test import TestCase
from django.core.files import File
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from ratticweb.tests.helper import TestData
from cred.models import Cred, Group
import os
here = os.path.abspath(os.path.dirname(__file__))
ssh_keys = os.path.join(here, "ssh_keys")
class CredSSHKeyTest(TestCase):
def setUp(self):
self.data = TestData()
def test_upload_cred(self):
# Load the edit form
resp = self.data.norm.get(
reverse('cred.views.edit', args=(self.data.cred.id, ))
)
self.assertEqual(resp.status_code, 200)
# Get the data from the form to submit
form = resp.context['form']
post = form.initial
del post['url']
del post['attachment']
# Open a test file and upload it
with open(os.path.join(ssh_keys, "1.pem"), 'r') as fp:
post['ssh_key'] = fp
resp = self.data.norm.post(
reverse('cred.views.edit', args=(self.data.cred.id, )),
post
)
self.assertEqual(resp.status_code, 302)
# Get a new copy of the cred from the DB
cred = Cred.objects.get(pk=self.data.cred.id)
# Check it matches the test file
with open(os.path.join(ssh_keys, "1.pem"), 'r') as fp:
self.assertEqual(fp.read(), cred.ssh_key.read())
def test_cred_fingerprint_url(self):
with open(os.path.join(ssh_keys, "1.pem")) as fle:
cred = Cred.objects.create(ssh_key=File(fle), group=self.data.cred.group)
cred.save()
resp = self.data.norm.get(reverse('cred.views.ssh_key_fingerprint', args=(cred.id, )))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, open(os.path.join(ssh_keys, "1.fingerprint")).read().strip())
def test_cred_fingerprint(self):
group = Group.objects.create(name="group")
with open(os.path.join(ssh_keys, "1.pem")) as fle:
cred = Cred.objects.create(ssh_key=File(fle), group=group)
cred.save()
self.assertEqual(cred.ssh_key_fingerprint(), open(os.path.join(ssh_keys, "1.fingerprint")).read().strip())
def test_cred_with_password_fingerprint(self):
group = Group.objects.create(name="group")
with open(os.path.join(ssh_keys, "2.pem")) as fle:
with open(os.path.join(ssh_keys, "2.password")) as pfle:
cred = Cred.objects.create(ssh_key=File(fle), group=group, password=pfle.read().strip())
cred.save()
self.assertEqual(cred.ssh_key_fingerprint(), open(os.path.join(ssh_keys, "2.fingerprint")).read().strip())
CredSSHKeyTest = override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',))(CredSSHKeyTest)
| gpl-2.0 |
kinow-io/kinow-python-sdk | kinow_client/models/employees.py | 1 | 3370 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Employees(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, pagination=None, data=None):
"""
Employees - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'pagination': 'Pagination',
'data': 'list[Employee]'
}
self.attribute_map = {
'pagination': 'pagination',
'data': 'data'
}
self._pagination = pagination
self._data = data
@property
def pagination(self):
"""
Gets the pagination of this Employees.
:return: The pagination of this Employees.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""
Sets the pagination of this Employees.
:param pagination: The pagination of this Employees.
:type: Pagination
"""
self._pagination = pagination
@property
def data(self):
"""
Gets the data of this Employees.
:return: The data of this Employees.
:rtype: list[Employee]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this Employees.
:param data: The data of this Employees.
:type: list[Employee]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
mhvk/numpy | numpy/distutils/misc_util.py | 5 | 86629 | import os
import re
import sys
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import multiprocessing
import textwrap
import importlib.util
from threading import local as tlocal
import distutils
from distutils.errors import DistutilsError
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
if _tmpdirs is not None:
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
from numpy.compat import npy_load_module
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib:
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
processors on the system, with a maximum of 8 (to prevent
overloading the system if there a lot of CPUs).
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
cpu_count = min(cpu_count, 8)
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
with open(config_file) as fid:
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def sorted_glob(fileglob):
"""sorts output of python glob for https://bugs.python.org/issue30461
to allow extensions to have reproducible build results"""
return sorted(glob.glob(fileglob))
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = sorted_glob(n)
p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_version():
"Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
else:
msc_ver = None
return msc_ver
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
ver = msvc_runtime_major ()
if ver:
if ver < 140:
return "msvcr%i" % ver
else:
return "vcruntime%i" % ver
else:
return None
def msvc_runtime_major():
"Return major version of MSVC runtime coded like get_build_msvc_version"
major = {1300: 70, # MSVC 7.0
1310: 71, # MSVC 7.1
1400: 80, # MSVC 8
1500: 90, # MSVC 9 (aka 2008)
1600: 100, # MSVC 10 (aka 2010)
1900: 140, # MSVC 14 (aka 2015)
}.get(msvc_runtime_version(), None)
return major
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
with open(source, 'r') as f:
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
return modules
def is_string(s):
return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except Exception:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' in s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
"""
Return commandline representation used to determine if a file needs
to be recompiled
"""
cmdline = 'commandline: '
cmdline += ' '.join(cc_args)
cmdline += ' '.join(extra_postargs)
cmdline += ' '.join(pp_opts) + '\n'
return cmdline
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = npy_load_module('_'.join(n.split('.')),
setup_py,
('.py', 'U', 1))
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
with open(branch_cache_fn, 'r') as f:
for line in f:
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = npy_load_module('_'.join(n.split('.')),
fn, info)
except ImportError as e:
self.warn(str(e))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
# Try if versioneer module
try:
version = version_module.get_versions()['version']
except AttributeError:
pass
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory.
If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
is returned. Otherwise, a path inside the location of the numpy module is
returned.
The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
customized npy-pkg-config .ini files for the cross-compilation
environment, and using them when cross-compiling.
"""
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d is not None:
return d
spec = importlib.util.find_spec('numpy')
d = os.path.join(os.path.dirname(spec.origin),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
import builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
), stacklevel=2)
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
with open(target, 'w') as f:
f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write(textwrap.dedent("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
if sys.version_info >= (3, 8):
os.add_dll_directory(extra_dll_dir)
else:
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + extra_dll_dir
"""))
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(textwrap.dedent(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
* ``baseline``: minimum CPU features required
* ``found``: dispatched features supported in the system
* ``not found``: dispatched features that are not supported
in the system
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
print("Supported SIMD extensions in this NumPy install:")
print(" baseline = %s" % (','.join(__cpu_baseline__)))
print(" found = %s" % (','.join(features_found)))
print(" not found = %s" % (','.join(features_not_found)))
'''))
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
def get_build_architecture():
# Importing distutils.msvccompiler triggers a warning on non-Windows
# systems, so delay the import to here.
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
| bsd-3-clause |
ChromiumWebApps/chromium | build/android/pylib/utils/test_options_parser.py | 54 | 3471 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses options for the instrumentation tests."""
import os
# TODO(gkanwar): Some downstream scripts current rely on these functions
# existing. This dependency should be removed, and this file deleted, in the
# future.
def AddBuildTypeOption(option_parser):
"""Decorates OptionParser with build type option."""
default_build_type = 'Debug'
if 'BUILDTYPE' in os.environ:
default_build_type = os.environ['BUILDTYPE']
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
option_parser.add_option('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
def AddTestRunnerOptions(option_parser, default_timeout=60):
"""Decorates OptionParser with options applicable to all tests."""
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=default_timeout)
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
option_parser.add_option('--num_retries', dest='num_retries', type='int',
default=2,
help='Number of retries for a test before '
'giving up.')
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps',
'traceview']
option_parser.add_option('--profiler', dest='profilers', action='append',
choices=profilers,
help='Profiling tool to run during test. '
'Pass multiple times to run multiple profilers. '
'Available profilers: %s' % profilers)
option_parser.add_option('--tool',
dest='tool',
help='Run the test under a tool '
'(use --tool help to list them)')
option_parser.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
option_parser.add_option('--skip-deps-push', dest='push_deps',
action='store_false', default=True,
help='Do not push dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.')
AddBuildTypeOption(option_parser)
| bsd-3-clause |
endlessm/chromium-browser | tools/site_compare/commands/timeload.py | 189 | 4936 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare command to time page loads
Loads a series of URLs in a series of browsers (and browser versions)
and measures how long the page takes to load in each. Outputs a
comma-delimited file. The first line is "URL,[browser names", each
additional line is a URL follored by comma-delimited times (in seconds),
or the string "timeout" or "crashed".
"""
import os # Functions for walking the directory tree
import tempfile # Get a temporary directory to hold intermediates
import command_line
import drivers # Functions for driving keyboard/mouse/windows, OS-specific
import operators # Functions that, given two bitmaps as input, produce
# output depending on the performance of an operation
import scrapers # Functions that know how to capture a render from
# particular browsers
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["timeload"],
"Measures how long a series of URLs takes to load in one or more browsers.",
None,
ExecuteTimeLoad)
cmd.AddArgument(
["-b", "--browsers"], "List of browsers to use. Comma-separated",
type="string", required=True)
cmd.AddArgument(
["-bp", "--browserpaths"], "List of paths to browsers. Comma-separated",
type="string", required=False)
cmd.AddArgument(
["-bv", "--browserversions"],
"List of versions of browsers. Comma-separated",
type="string", required=False)
cmd.AddArgument(
["-u", "--url"], "URL to time")
cmd.AddArgument(
["-l", "--list"], "List of URLs to time", type="readfile")
cmd.AddMutualExclusion(["--url", "--list"])
cmd.AddArgument(
["-s", "--startline"], "First line of URL list", type="int")
cmd.AddArgument(
["-e", "--endline"], "Last line of URL list (exclusive)", type="int")
cmd.AddArgument(
["-c", "--count"], "Number of lines of URL file to use", type="int")
cmd.AddDependency("--startline", "--list")
cmd.AddRequiredGroup(["--url", "--list"])
cmd.AddDependency("--endline", "--list")
cmd.AddDependency("--count", "--list")
cmd.AddMutualExclusion(["--count", "--endline"])
cmd.AddDependency("--count", "--startline")
cmd.AddArgument(
["-t", "--timeout"], "Amount of time (seconds) to wait for browser to "
"finish loading",
type="int", default=60)
cmd.AddArgument(
["-log", "--logfile"], "File to write output", type="string", required=True)
cmd.AddArgument(
["-sz", "--size"], "Browser window size", default=(800, 600), type="coords")
def ExecuteTimeLoad(command):
"""Executes the TimeLoad command."""
browsers = command["--browsers"].split(",")
num_browsers = len(browsers)
if command["--browserversions"]:
browser_versions = command["--browserversions"].split(",")
else:
browser_versions = [None] * num_browsers
if command["--browserpaths"]:
browser_paths = command["--browserpaths"].split(",")
else:
browser_paths = [None] * num_browsers
if len(browser_versions) != num_browsers:
raise ValueError(
"--browserversions must be same length as --browser_paths")
if len(browser_paths) != num_browsers:
raise ValueError(
"--browserversions must be same length as --browser_paths")
if [b for b in browsers if b not in ["chrome", "ie", "firefox"]]:
raise ValueError("unknown browsers: %r" % b)
scraper_list = []
for b in xrange(num_browsers):
version = browser_versions[b]
if not version: version = None
scraper = scrapers.GetScraper( (browsers[b], version) )
if not scraper:
raise ValueError("could not find scraper for (%r, %r)" %
(browsers[b], version))
scraper_list.append(scraper)
if command["--url"]:
url_list = [command["--url"]]
else:
startline = command["--startline"]
if command["--count"]:
endline = startline+command["--count"]
else:
endline = command["--endline"]
url_list = [url.strip() for url in
open(command["--list"], "r").readlines()[startline:endline]]
log_file = open(command["--logfile"], "w")
log_file.write("URL")
for b in xrange(num_browsers):
log_file.write(",%s" % browsers[b])
if browser_versions[b]: log_file.write(" %s" % browser_versions[b])
log_file.write("\n")
results = {}
for url in url_list:
results[url] = [None] * num_browsers
for b in xrange(num_browsers):
result = scraper_list[b].Time(url_list, command["--size"],
command["--timeout"],
path=browser_paths[b])
for (url, time) in result:
results[url][b] = time
# output the results
for url in url_list:
log_file.write(url)
for b in xrange(num_browsers):
log_file.write(",%r" % results[url][b])
| bsd-3-clause |
xuanzhui/SoochowOraWIFIPW | Python3/getFileSSORequests.py | 1 | 2330 | __author__ = 'xuanzhui'
# http://docs.python-requests.org/en/latest/user/quickstart/
import requests, re
def printDebugInfo(resp):
print('respond status code : ', resp.status_code)
print('respond cookies : ', resp.cookies)
print('respond headers : ', resp.headers)
print('respond content : ', resp.content)
def parseHiddenInputValues(ssopage):
data_params = {}
params_list = re.findall(b'input type="hidden" name="(.*?)" value="(.*?)"', ssopage)
for param in params_list:
data_params[param[0].decode()] = param[1].decode()
return data_params
debugFlag = True
targeturl = 'https://gmp.oracle.com/captcha/files/airespace_pwd_apac.txt'
if debugFlag:
print('-- step1 request target url --')
resp = requests.get(targeturl, allow_redirects=False)
if debugFlag:
printDebugInfo(resp)
if debugFlag:
print('-- step2 load redirected sso page --')
resp.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0'
resp = requests.get(resp.headers['location'], allow_redirects=False, headers=resp.headers)
ssopage = resp.content
if debugFlag:
printDebugInfo(resp)
if debugFlag:
print('-- step3 directly post login page --')
#resp.headers['User-Agent']='Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0'
resp = requests.post('https://login.oracle.com/mysso/signon.jsp',
data=parseHiddenInputValues(ssopage),
allow_redirects=False)
ssopage = resp.content
if debugFlag:
printDebugInfo(resp)
if debugFlag:
print('-- step4 post login page with username and password --')
data_params = parseHiddenInputValues(ssopage)
#TODO Set your own username and password
data_params['ssousername']='username'
data_params['password']='password'
#resp.headers['Host']='login.oracle.com'
#resp.headers['Accept-Encoding']='gzip, deflate'
#resp.headers['Content-Type']='application/x-www-form-urlencoded'
resp = requests.post('https://login.oracle.com/oam/server/sso/auth_cred_submit',
data=data_params,
allow_redirects=False)
if debugFlag:
printDebugInfo(resp)
if debugFlag:
print('-- step5 retrieve target content --')
resp = requests.get(resp.headers['location'])
if debugFlag:
printDebugInfo(resp)
resp.content | apache-2.0 |
dongpinglai/my_tornado | tornado/test/util_test.py | 5 | 9781 | from io import StringIO
import re
import sys
import datetime
import unittest
import tornado.escape
from tornado.escape import utf8
from tornado.util import (
raise_exc_info,
Configurable,
exec_in,
ArgReplacer,
timedelta_to_seconds,
import_object,
re_unescape,
is_finalizing,
)
import typing
from typing import cast
if typing.TYPE_CHECKING:
from typing import Dict, Any # noqa: F401
class RaiseExcInfoTest(unittest.TestCase):
def test_two_arg_exception(self):
# This test would fail on python 3 if raise_exc_info were simply
# a three-argument raise statement, because TwoArgException
# doesn't have a "copy constructor"
class TwoArgException(Exception):
def __init__(self, a, b):
super().__init__()
self.a, self.b = a, b
try:
raise TwoArgException(1, 2)
except TwoArgException:
exc_info = sys.exc_info()
try:
raise_exc_info(exc_info)
self.fail("didn't get expected exception")
except TwoArgException as e:
self.assertIs(e, exc_info[1])
class TestConfigurable(Configurable):
@classmethod
def configurable_base(cls):
return TestConfigurable
@classmethod
def configurable_default(cls):
return TestConfig1
class TestConfig1(TestConfigurable):
def initialize(self, pos_arg=None, a=None):
self.a = a
self.pos_arg = pos_arg
class TestConfig2(TestConfigurable):
def initialize(self, pos_arg=None, b=None):
self.b = b
self.pos_arg = pos_arg
class TestConfig3(TestConfigurable):
# TestConfig3 is a configuration option that is itself configurable.
@classmethod
def configurable_base(cls):
return TestConfig3
@classmethod
def configurable_default(cls):
return TestConfig3A
class TestConfig3A(TestConfig3):
def initialize(self, a=None):
self.a = a
class TestConfig3B(TestConfig3):
def initialize(self, b=None):
self.b = b
class ConfigurableTest(unittest.TestCase):
def setUp(self):
self.saved = TestConfigurable._save_configuration()
self.saved3 = TestConfig3._save_configuration()
def tearDown(self):
TestConfigurable._restore_configuration(self.saved)
TestConfig3._restore_configuration(self.saved3)
def checkSubclasses(self):
# no matter how the class is configured, it should always be
# possible to instantiate the subclasses directly
self.assertIsInstance(TestConfig1(), TestConfig1)
self.assertIsInstance(TestConfig2(), TestConfig2)
obj = TestConfig1(a=1)
self.assertEqual(obj.a, 1)
obj2 = TestConfig2(b=2)
self.assertEqual(obj2.b, 2)
def test_default(self):
# In these tests we combine a typing.cast to satisfy mypy with
# a runtime type-assertion. Without the cast, mypy would only
# let us access attributes of the base class.
obj = cast(TestConfig1, TestConfigurable())
self.assertIsInstance(obj, TestConfig1)
self.assertIs(obj.a, None)
obj = cast(TestConfig1, TestConfigurable(a=1))
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 1)
self.checkSubclasses()
def test_config_class(self):
TestConfigurable.configure(TestConfig2)
obj = cast(TestConfig2, TestConfigurable())
self.assertIsInstance(obj, TestConfig2)
self.assertIs(obj.b, None)
obj = cast(TestConfig2, TestConfigurable(b=2))
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 2)
self.checkSubclasses()
def test_config_str(self):
TestConfigurable.configure("tornado.test.util_test.TestConfig2")
obj = cast(TestConfig2, TestConfigurable())
self.assertIsInstance(obj, TestConfig2)
self.assertIs(obj.b, None)
obj = cast(TestConfig2, TestConfigurable(b=2))
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 2)
self.checkSubclasses()
def test_config_args(self):
TestConfigurable.configure(None, a=3)
obj = cast(TestConfig1, TestConfigurable())
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 3)
obj = cast(TestConfig1, TestConfigurable(42, a=4))
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 4)
self.assertEqual(obj.pos_arg, 42)
self.checkSubclasses()
# args bound in configure don't apply when using the subclass directly
obj = TestConfig1()
self.assertIs(obj.a, None)
def test_config_class_args(self):
TestConfigurable.configure(TestConfig2, b=5)
obj = cast(TestConfig2, TestConfigurable())
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 5)
obj = cast(TestConfig2, TestConfigurable(42, b=6))
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 6)
self.assertEqual(obj.pos_arg, 42)
self.checkSubclasses()
# args bound in configure don't apply when using the subclass directly
obj = TestConfig2()
self.assertIs(obj.b, None)
def test_config_multi_level(self):
TestConfigurable.configure(TestConfig3, a=1)
obj = cast(TestConfig3A, TestConfigurable())
self.assertIsInstance(obj, TestConfig3A)
self.assertEqual(obj.a, 1)
TestConfigurable.configure(TestConfig3)
TestConfig3.configure(TestConfig3B, b=2)
obj2 = cast(TestConfig3B, TestConfigurable())
self.assertIsInstance(obj2, TestConfig3B)
self.assertEqual(obj2.b, 2)
def test_config_inner_level(self):
# The inner level can be used even when the outer level
# doesn't point to it.
obj = TestConfig3()
self.assertIsInstance(obj, TestConfig3A)
TestConfig3.configure(TestConfig3B)
obj = TestConfig3()
self.assertIsInstance(obj, TestConfig3B)
# Configuring the base doesn't configure the inner.
obj2 = TestConfigurable()
self.assertIsInstance(obj2, TestConfig1)
TestConfigurable.configure(TestConfig2)
obj3 = TestConfigurable()
self.assertIsInstance(obj3, TestConfig2)
obj = TestConfig3()
self.assertIsInstance(obj, TestConfig3B)
class UnicodeLiteralTest(unittest.TestCase):
def test_unicode_escapes(self):
self.assertEqual(utf8(u"\u00e9"), b"\xc3\xa9")
class ExecInTest(unittest.TestCase):
# TODO(bdarnell): make a version of this test for one of the new
# future imports available in python 3.
@unittest.skip("no testable future imports")
def test_no_inherit_future(self):
# This file has from __future__ import print_function...
f = StringIO()
print("hello", file=f)
# ...but the template doesn't
exec_in('print >> f, "world"', dict(f=f))
self.assertEqual(f.getvalue(), "hello\nworld\n")
class ArgReplacerTest(unittest.TestCase):
def setUp(self):
def function(x, y, callback=None, z=None):
pass
self.replacer = ArgReplacer(function, "callback")
def test_omitted(self):
args = (1, 2)
kwargs = dict() # type: Dict[str, Any]
self.assertIs(self.replacer.get_old_value(args, kwargs), None)
self.assertEqual(
self.replacer.replace("new", args, kwargs),
(None, (1, 2), dict(callback="new")),
)
def test_position(self):
args = (1, 2, "old", 3)
kwargs = dict() # type: Dict[str, Any]
self.assertEqual(self.replacer.get_old_value(args, kwargs), "old")
self.assertEqual(
self.replacer.replace("new", args, kwargs),
("old", [1, 2, "new", 3], dict()),
)
def test_keyword(self):
args = (1,)
kwargs = dict(y=2, callback="old", z=3)
self.assertEqual(self.replacer.get_old_value(args, kwargs), "old")
self.assertEqual(
self.replacer.replace("new", args, kwargs),
("old", (1,), dict(y=2, callback="new", z=3)),
)
class TimedeltaToSecondsTest(unittest.TestCase):
def test_timedelta_to_seconds(self):
time_delta = datetime.timedelta(hours=1)
self.assertEqual(timedelta_to_seconds(time_delta), 3600.0)
class ImportObjectTest(unittest.TestCase):
def test_import_member(self):
self.assertIs(import_object("tornado.escape.utf8"), utf8)
def test_import_member_unicode(self):
self.assertIs(import_object(u"tornado.escape.utf8"), utf8)
def test_import_module(self):
self.assertIs(import_object("tornado.escape"), tornado.escape)
def test_import_module_unicode(self):
# The internal implementation of __import__ differs depending on
# whether the thing being imported is a module or not.
# This variant requires a byte string in python 2.
self.assertIs(import_object(u"tornado.escape"), tornado.escape)
class ReUnescapeTest(unittest.TestCase):
def test_re_unescape(self):
test_strings = ("/favicon.ico", "index.html", "Hello, World!", "!$@#%;")
for string in test_strings:
self.assertEqual(string, re_unescape(re.escape(string)))
def test_re_unescape_raises_error_on_invalid_input(self):
with self.assertRaises(ValueError):
re_unescape("\\d")
with self.assertRaises(ValueError):
re_unescape("\\b")
with self.assertRaises(ValueError):
re_unescape("\\Z")
class IsFinalizingTest(unittest.TestCase):
def test_basic(self):
self.assertFalse(is_finalizing())
| apache-2.0 |
uni2u/neutron | neutron/extensions/multiprovidernet.py | 18 | 4327 | # Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as qexception
from neutron.extensions import providernet as pnet
SEGMENTS = 'segments'
class SegmentsSetInConjunctionWithProviders(qexception.InvalidInput):
message = _("Segments and provider values cannot both be set.")
class SegmentsContainDuplicateEntry(qexception.InvalidInput):
message = _("Duplicate segment entry in request.")
def _convert_and_validate_segments(segments, valid_values=None):
for segment in segments:
segment.setdefault(pnet.NETWORK_TYPE, attr.ATTR_NOT_SPECIFIED)
segment.setdefault(pnet.PHYSICAL_NETWORK, attr.ATTR_NOT_SPECIFIED)
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
if segmentation_id:
segment[pnet.SEGMENTATION_ID] = attr.convert_to_int(
segmentation_id)
else:
segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED
if len(segment.keys()) != 3:
msg = (_("Unrecognized attribute(s) '%s'") %
', '.join(set(segment.keys()) -
set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID])))
raise webob.exc.HTTPBadRequest(msg)
def check_duplicate_segments(segments, is_partial_func=None):
"""Helper function checking duplicate segments.
If is_partial_funcs is specified and not None, then
SegmentsContainDuplicateEntry is raised if two segments are identical and
non partially defined (is_partial_func(segment) == False).
Otherwise SegmentsContainDuplicateEntry is raised if two segment are
identical.
"""
if is_partial_func is not None:
segments = [s for s in segments if not is_partial_func(s)]
fully_specifieds = [tuple(sorted(s.items())) for s in segments]
if len(set(fully_specifieds)) != len(fully_specifieds):
raise SegmentsContainDuplicateEntry()
attr.validators['type:convert_segments'] = (
_convert_and_validate_segments)
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
SEGMENTS: {'allow_post': True, 'allow_put': True,
'validate': {'type:convert_segments': None},
'convert_list_to': attr.convert_kvp_list_to_dict,
'default': attr.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
}
}
class Multiprovidernet(extensions.ExtensionDescriptor):
"""Extension class supporting multiple provider networks.
This class is used by neutron's extension framework to make
metadata about the multiple provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with 'segments' attribute.
With admin rights, network dictionaries returned will also include
'segments' attribute.
"""
@classmethod
def get_name(cls):
return "Multi Provider Network"
@classmethod
def get_alias(cls):
return "multi-provider"
@classmethod
def get_description(cls):
return ("Expose mapping of virtual networks to multiple physical "
"networks")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/multi-provider/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-06-27T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
justathoughtor2/atomicApe | cygwin/lib/python2.7/site-packages/pylint/test/unittest_lint.py | 3 | 27410 | # Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from contextlib import contextmanager
import sys
import os
import tempfile
from shutil import rmtree
from os import getcwd, chdir
from os.path import join, basename, dirname, isdir, abspath, sep
import unittest
import six
from six.moves import reload_module
from pylint import config, lint
from pylint.lint import PyLinter, Run, preprocess_options, \
ArgumentPreprocessingError
from pylint.utils import MSG_STATE_SCOPE_CONFIG, MSG_STATE_SCOPE_MODULE, MSG_STATE_CONFIDENCE, \
MessagesStore, PyLintASTWalker, MessageDefinition, FileState, \
build_message_def, tokenize_module, UnknownMessage
from pylint.testutils import TestReporter
from pylint.reporters import text, html
from pylint import checkers
from pylint.checkers.utils import check_messages
from pylint import interfaces
if os.name == 'java':
if os._name == 'nt':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
else:
if sys.platform == 'win32':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
@contextmanager
def fake_home():
folder = tempfile.mkdtemp('fake-home')
old_home = os.environ.get(HOME)
try:
os.environ[HOME] = folder
yield
finally:
os.environ.pop('PYLINTRC', '')
if old_home is None:
del os.environ[HOME]
else:
os.environ[HOME] = old_home
rmtree(folder, ignore_errors=True)
def remove(file):
try:
os.remove(file)
except OSError:
pass
HERE = abspath(dirname(__file__))
INPUTDIR = join(HERE, 'input')
@contextmanager
def tempdir():
"""Create a temp directory and change the current location to it.
This is supposed to be used with a *with* statement.
"""
tmp = tempfile.mkdtemp()
# Get real path of tempfile, otherwise test fail on mac os x
current_dir = getcwd()
chdir(tmp)
abs_tmp = abspath('.')
try:
yield abs_tmp
finally:
chdir(current_dir)
rmtree(abs_tmp)
def create_files(paths, chroot='.'):
"""Creates directories and files found in <path>.
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = join(chroot, path)
filename = basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(dirname(path))
files.add(path)
for dirpath in dirs:
if not isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
class SysPathFixupTC(unittest.TestCase):
def setUp(self):
self.orig = list(sys.path)
self.fake = [1, 2, 3]
sys.path[:] = self.fake
def tearDown(self):
sys.path[:] = self.orig
def test_no_args(self):
with lint.fix_import_path([]):
self.assertEqual(sys.path, self.fake)
self.assertEqual(sys.path, self.fake)
def test_one_arg(self):
with tempdir() as chroot:
create_files(['a/b/__init__.py'])
expected = [join(chroot, 'a')] + self.fake
cases = (
['a/b/'],
['a/b'],
['a/b/__init__.py'],
['a/'],
['a'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
def test_two_similar_args(self):
with tempdir() as chroot:
create_files(['a/b/__init__.py', 'a/c/__init__.py'])
expected = [join(chroot, 'a')] + self.fake
cases = (
['a/b', 'a/c'],
['a/c/', 'a/b/'],
['a/b/__init__.py', 'a/c/__init__.py'],
['a', 'a/c/__init__.py'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
def test_more_args(self):
with tempdir() as chroot:
create_files(['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'])
expected = [
join(chroot, suffix)
for suffix in [sep.join(('a', 'b')), 'a', sep.join(('a', 'e'))]
] + self.fake
cases = (
['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'],
['a/b/c', 'a', 'a/e'],
['a/b/c', 'a', 'a/b/c', 'a/e', 'a'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
class PyLinterTC(unittest.TestCase):
def setUp(self):
self.linter = PyLinter()
self.linter.disable('I')
self.linter.config.persistent = 0
# register checkers
checkers.initialize(self.linter)
self.linter.set_reporter(TestReporter())
def init_linter(self):
linter = self.linter
linter.open()
linter.set_current_module('toto')
linter.file_state = FileState('toto')
return linter
def test_pylint_visit_method_taken_in_account(self):
class CustomChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'custom'
msgs = {'W9999': ('', 'custom', '')}
@check_messages('custom')
def visit_class(self, _):
pass
self.linter.register_checker(CustomChecker(self.linter))
self.linter.open()
out = six.moves.StringIO()
self.linter.set_reporter(text.TextReporter(out))
self.linter.check('abc')
def test_enable_message(self):
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102'))
linter.disable('W0101', scope='package')
linter.disable('W0102', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('W0102', 1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102'))
linter.enable('W0101', scope='package')
linter.enable('W0102', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102', 1))
def test_enable_message_category(self):
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
linter.disable('W', scope='package')
linter.disable('C', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
self.assertFalse(linter.is_message_enabled('C0202', line=1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
linter.enable('W', scope='package')
linter.enable('C', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
self.assertTrue(linter.is_message_enabled('C0202', line=1))
def test_message_state_scope(self):
class FakeConfig(object):
confidence = ['HIGH']
linter = self.init_linter()
linter.disable('C0202')
self.assertEqual(MSG_STATE_SCOPE_CONFIG,
linter.get_message_state_scope('C0202'))
linter.disable('W0101', scope='module', line=3)
self.assertEqual(MSG_STATE_SCOPE_CONFIG,
linter.get_message_state_scope('C0202'))
self.assertEqual(MSG_STATE_SCOPE_MODULE,
linter.get_message_state_scope('W0101', 3))
linter.enable('W0102', scope='module', line=3)
self.assertEqual(MSG_STATE_SCOPE_MODULE,
linter.get_message_state_scope('W0102', 3))
linter.config = FakeConfig()
self.assertEqual(
MSG_STATE_CONFIDENCE,
linter.get_message_state_scope('this-is-bad',
confidence=interfaces.INFERENCE))
def test_enable_message_block(self):
linter = self.init_linter()
linter.open()
filepath = join(INPUTDIR, 'func_block_disable_msg.py')
linter.set_current_module('func_block_disable_msg')
astroid = linter.get_ast(filepath, 'func_block_disable_msg')
linter.process_tokens(tokenize_module(astroid))
fs = linter.file_state
fs.collect_block_lines(linter.msgs_store, astroid)
# global (module level)
self.assertTrue(linter.is_message_enabled('W0613'))
self.assertTrue(linter.is_message_enabled('E1101'))
# meth1
self.assertTrue(linter.is_message_enabled('W0613', 13))
# meth2
self.assertFalse(linter.is_message_enabled('W0613', 18))
# meth3
self.assertFalse(linter.is_message_enabled('E1101', 24))
self.assertTrue(linter.is_message_enabled('E1101', 26))
# meth4
self.assertFalse(linter.is_message_enabled('E1101', 32))
self.assertTrue(linter.is_message_enabled('E1101', 36))
# meth5
self.assertFalse(linter.is_message_enabled('E1101', 42))
self.assertFalse(linter.is_message_enabled('E1101', 43))
self.assertTrue(linter.is_message_enabled('E1101', 46))
self.assertFalse(linter.is_message_enabled('E1101', 49))
self.assertFalse(linter.is_message_enabled('E1101', 51))
# meth6
self.assertFalse(linter.is_message_enabled('E1101', 57))
self.assertTrue(linter.is_message_enabled('E1101', 61))
self.assertFalse(linter.is_message_enabled('E1101', 64))
self.assertFalse(linter.is_message_enabled('E1101', 66))
self.assertTrue(linter.is_message_enabled('E0602', 57))
self.assertTrue(linter.is_message_enabled('E0602', 61))
self.assertFalse(linter.is_message_enabled('E0602', 62))
self.assertTrue(linter.is_message_enabled('E0602', 64))
self.assertTrue(linter.is_message_enabled('E0602', 66))
# meth7
self.assertFalse(linter.is_message_enabled('E1101', 70))
self.assertTrue(linter.is_message_enabled('E1101', 72))
self.assertTrue(linter.is_message_enabled('E1101', 75))
self.assertTrue(linter.is_message_enabled('E1101', 77))
fs = linter.file_state
self.assertEqual(17, fs._suppression_mapping['W0613', 18])
self.assertEqual(30, fs._suppression_mapping['E1101', 33])
self.assertTrue(('E1101', 46) not in fs._suppression_mapping)
self.assertEqual(1, fs._suppression_mapping['C0302', 18])
self.assertEqual(1, fs._suppression_mapping['C0302', 50])
# This is tricky. While the disable in line 106 is disabling
# both 108 and 110, this is usually not what the user wanted.
# Therefore, we report the closest previous disable comment.
self.assertEqual(106, fs._suppression_mapping['E1101', 108])
self.assertEqual(109, fs._suppression_mapping['E1101', 110])
def test_enable_by_symbol(self):
"""messages can be controlled by symbolic names.
The state is consistent across symbols and numbers.
"""
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102'))
self.assertTrue(linter.is_message_enabled('dangerous-default-value'))
linter.disable('unreachable', scope='package')
linter.disable('dangerous-default-value', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('unreachable'))
self.assertFalse(linter.is_message_enabled('W0102', 1))
self.assertFalse(linter.is_message_enabled('dangerous-default-value', 1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102'))
self.assertTrue(linter.is_message_enabled('dangerous-default-value'))
linter.enable('unreachable', scope='package')
linter.enable('dangerous-default-value', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102', 1))
self.assertTrue(linter.is_message_enabled('dangerous-default-value', 1))
def test_lint_ext_module_with_file_output(self):
self.linter.set_reporter(text.TextReporter())
if sys.version_info < (3, 0):
strio = 'StringIO'
else:
strio = 'io'
self.linter.config.files_output = True
pylint_strio = 'pylint_%s.txt' % strio
files = [pylint_strio, 'pylint_global.txt']
for file in files:
self.addCleanup(remove, file)
self.linter.check(strio)
self.linter.generate_reports()
for f in files:
self.assertTrue(os.path.exists(f))
def test_enable_report(self):
self.assertEqual(self.linter.report_is_enabled('RP0001'), True)
self.linter.disable('RP0001')
self.assertEqual(self.linter.report_is_enabled('RP0001'), False)
self.linter.enable('RP0001')
self.assertEqual(self.linter.report_is_enabled('RP0001'), True)
def test_report_output_format_aliased(self):
text.register(self.linter)
self.linter.set_option('output-format', 'text')
self.assertEqual(self.linter.reporter.__class__.__name__, 'TextReporter')
def test_report_output_format_custom(self):
this_module = sys.modules[__name__]
class TestReporter(object):
pass
this_module.TestReporter = TestReporter
class_name = ".".join((this_module.__name__, 'TestReporter'))
self.linter.set_option('output-format', class_name)
self.assertEqual(self.linter.reporter.__class__.__name__, 'TestReporter')
def test_set_option_1(self):
linter = self.linter
linter.set_option('disable', 'C0111,W0234')
self.assertFalse(linter.is_message_enabled('C0111'))
self.assertFalse(linter.is_message_enabled('W0234'))
self.assertTrue(linter.is_message_enabled('W0113'))
self.assertFalse(linter.is_message_enabled('missing-docstring'))
self.assertFalse(linter.is_message_enabled('non-iterator-returned'))
def test_set_option_2(self):
linter = self.linter
linter.set_option('disable', ('C0111', 'W0234') )
self.assertFalse(linter.is_message_enabled('C0111'))
self.assertFalse(linter.is_message_enabled('W0234'))
self.assertTrue(linter.is_message_enabled('W0113'))
self.assertFalse(linter.is_message_enabled('missing-docstring'))
self.assertFalse(linter.is_message_enabled('non-iterator-returned'))
def test_enable_checkers(self):
self.linter.disable('design')
self.assertFalse('design' in [c.name for c in self.linter.prepare_checkers()])
self.linter.enable('design')
self.assertTrue('design' in [c.name for c in self.linter.prepare_checkers()])
def test_errors_only(self):
linter = self.linter
self.linter.error_mode()
checkers = self.linter.prepare_checkers()
checker_names = set(c.name for c in checkers)
should_not = set(('design', 'format', 'metrics',
'miscellaneous', 'similarities'))
self.assertSetEqual(set(), should_not & checker_names)
def test_disable_similar(self):
self.linter.set_option('disable', 'RP0801')
self.linter.set_option('disable', 'R0801')
self.assertFalse('similarities' in [c.name for c in self.linter.prepare_checkers()])
def test_disable_alot(self):
"""check that we disabled a lot of checkers"""
self.linter.set_option('reports', False)
self.linter.set_option('disable', 'R,C,W')
checker_names = [c.name for c in self.linter.prepare_checkers()]
for cname in ('design', 'metrics', 'similarities'):
self.assertFalse(cname in checker_names, cname)
def test_addmessage(self):
self.linter.set_reporter(TestReporter())
self.linter.open()
self.linter.set_current_module('0123')
self.linter.add_message('C0301', line=1, args=(1, 2))
self.linter.add_message('line-too-long', line=2, args=(3, 4))
self.assertEqual(
['C: 1: Line too long (1/2)', 'C: 2: Line too long (3/4)'],
self.linter.reporter.messages)
def test_init_hooks_called_before_load_plugins(self):
self.assertRaises(RuntimeError,
Run, ['--load-plugins', 'unexistant', '--init-hook', 'raise RuntimeError'])
self.assertRaises(RuntimeError,
Run, ['--init-hook', 'raise RuntimeError', '--load-plugins', 'unexistant'])
def test_analyze_explicit_script(self):
self.linter.set_reporter(TestReporter())
self.linter.check(os.path.join(os.path.dirname(__file__), 'data', 'ascript'))
self.assertEqual(
['C: 2: Line too long (175/100)'],
self.linter.reporter.messages)
def test_html_reporter_missing_files(self):
output = six.StringIO()
self.linter.set_reporter(html.HTMLReporter(output))
self.linter.set_option('output-format', 'html')
self.linter.check('troppoptop.py')
self.linter.generate_reports()
value = output.getvalue()
self.assertIn('troppoptop.py', value)
self.assertIn('fatal', value)
def test_python3_checker_disabled(self):
checker_names = [c.name for c in self.linter.prepare_checkers()]
self.assertNotIn('python3', checker_names)
self.linter.set_option('enable', 'python3')
checker_names = [c.name for c in self.linter.prepare_checkers()]
self.assertIn('python3', checker_names)
class ConfigTC(unittest.TestCase):
def setUp(self):
os.environ.pop('PYLINTRC', None)
def test_pylint_home(self):
uhome = os.path.expanduser('~')
if uhome == '~':
expected = '.pylint.d'
else:
expected = os.path.join(uhome, '.pylint.d')
self.assertEqual(config.PYLINT_HOME, expected)
try:
pylintd = join(tempfile.gettempdir(), '.pylint.d')
os.environ['PYLINTHOME'] = pylintd
try:
reload_module(config)
self.assertEqual(config.PYLINT_HOME, pylintd)
finally:
try:
os.remove(pylintd)
except:
pass
finally:
del os.environ['PYLINTHOME']
def test_pylintrc(self):
with fake_home():
try:
self.assertEqual(config.find_pylintrc(), None)
os.environ['PYLINTRC'] = join(tempfile.gettempdir(),
'.pylintrc')
self.assertEqual(config.find_pylintrc(), None)
os.environ['PYLINTRC'] = '.'
self.assertEqual(config.find_pylintrc(), None)
finally:
reload_module(config)
def test_pylintrc_parentdir(self):
with tempdir() as chroot:
create_files(['a/pylintrc', 'a/b/__init__.py', 'a/b/pylintrc',
'a/b/c/__init__.py', 'a/b/c/d/__init__.py',
'a/b/c/d/e/.pylintrc'])
with fake_home():
self.assertEqual(config.find_pylintrc(), None)
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d/e' : join(chroot, 'a', 'b', 'c', 'd', 'e', '.pylintrc'),
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
self.assertEqual(config.find_pylintrc(), expected)
def test_pylintrc_parentdir_no_package(self):
with tempdir() as chroot:
with fake_home():
create_files(['a/pylintrc', 'a/b/pylintrc', 'a/b/c/d/__init__.py'])
self.assertEqual(config.find_pylintrc(), None)
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : None,
'a/b/c/d' : None,
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
self.assertEqual(config.find_pylintrc(), expected)
class PreprocessOptionsTC(unittest.TestCase):
def _callback(self, name, value):
self.args.append((name, value))
def test_value_equal(self):
self.args = []
preprocess_options(['--foo', '--bar=baz', '--qu=ux'],
{'foo' : (self._callback, False),
'qu' : (self._callback, True)})
self.assertEqual(
[('foo', None), ('qu', 'ux')], self.args)
def test_value_space(self):
self.args = []
preprocess_options(['--qu', 'ux'],
{'qu' : (self._callback, True)})
self.assertEqual(
[('qu', 'ux')], self.args)
def test_error_missing_expected_value(self):
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar', '--qu=ux'],
{'bar' : (None, True)})
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar'],
{'bar' : (None, True)})
def test_error_unexpected_value(self):
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar=spam', '--qu=ux'],
{'bar' : (None, False)})
class MessagesStoreTC(unittest.TestCase):
def setUp(self):
self.store = MessagesStore()
class Checker(object):
name = 'achecker'
msgs = {
'W1234': ('message', 'msg-symbol', 'msg description.',
{'old_names': [('W0001', 'old-symbol')]}),
'E1234': ('Duplicate keyword argument %r in %s call',
'duplicate-keyword-arg',
'Used when a function call passes the same keyword argument multiple times.',
{'maxversion': (2, 6)}),
}
self.store.register_messages(Checker())
def _compare_messages(self, desc, msg, checkerref=False):
self.assertMultiLineEqual(desc, msg.format_help(checkerref=checkerref))
def test_check_message_id(self):
self.assertIsInstance(self.store.check_message_id('W1234'),
MessageDefinition)
self.assertRaises(UnknownMessage,
self.store.check_message_id, 'YB12')
def test_message_help(self):
msg = self.store.check_message_id('W1234')
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description. This message belongs to the achecker checker.''',
msg, checkerref=True)
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description.''',
msg, checkerref=False)
def test_message_help_minmax(self):
# build the message manually to be python version independant
msg = self.store.check_message_id('E1234')
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message belongs to the achecker checker. It can't be emitted when using
Python >= 2.6.''',
msg, checkerref=True)
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message can't be emitted when using Python >= 2.6.''',
msg, checkerref=False)
def test_list_messages(self):
sys.stdout = six.StringIO()
try:
self.store.list_messages()
output = sys.stdout.getvalue()
finally:
sys.stdout = sys.__stdout__
# cursory examination of the output: we're mostly testing it completes
self.assertIn(':msg-symbol (W1234): *message*', output)
def test_add_renamed_message(self):
self.store.add_renamed_message('W1234', 'old-bad-name', 'msg-symbol')
self.assertEqual('msg-symbol',
self.store.check_message_id('W1234').symbol)
self.assertEqual('msg-symbol',
self.store.check_message_id('old-bad-name').symbol)
def test_renamed_message_register(self):
self.assertEqual('msg-symbol',
self.store.check_message_id('W0001').symbol)
self.assertEqual('msg-symbol',
self.store.check_message_id('old-symbol').symbol)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
teamblueridge/gerrit | tools/gitlog2asciidoc.py | 22 | 3306 | #!/usr/bin/python
from optparse import OptionParser
import re
import subprocess
import sys
"""
This script generates a release note from the output of git log
between the specified tags.
Options:
--issues Show output the commits with issues associated with them.
--issue-numbers Show outputs issue numbers of the commits with issues
associated with them
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
parser = OptionParser(usage='usage: %prog [options] <since> <until>')
parser.add_option('-i', '--issues', action='store_true',
dest='issues_only', default=False,
help='only output the commits with issues association')
parser.add_option('-n', '--issue-numbers', action='store_true',
dest='issue_numbers_only', default=False,
help='only outputs issue numbers of the commits with \
issues association')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("wrong number of arguments")
issues_only = options.issues_only
issue_numbers_only = options.issue_numbers_only
since_until = args[0] + '..' + args[1]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
is_issue = False
# regex pattern to match following cases such as Bug: 123, Issue Bug: 123,
# Bug: GERRIT-123, Bug: issue 123, Bug issue: 123, issue: 123, issue: bug 123
p = re.compile('bug: GERRIT-|bug(:? issue)?:? |issue(:? bug)?:? ',
re.IGNORECASE)
if issue_numbers_only:
for line in stdout_value.splitlines(True):
if p.match(line):
sys.stdout.write(p.sub('', line))
else:
for line in stdout_value.splitlines(True):
# Move issue number to subject line
if p.match(line):
line = p.sub('issue ', line).replace('\n',' ')
subject = subject[:2] + line + subject[2:]
is_issue = True
elif line.startswith('* '):
# Write change log for a commit
if subject != "":
if (not issues_only or is_issue):
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
is_issue = False
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line):
continue
# Don't add extra blank line if last one is already blank
elif line == '\n' and message and message[-1] != '+\n':
message.append('+\n')
elif line != '\n':
message.append(line)
| apache-2.0 |
prospwro/odoo | addons/l10n_multilang/__openerp__.py | 339 | 1670 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi Language Chart of Accounts',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
* Multi language support for Chart of Accounts, Taxes, Tax Codes, Journals,
Accounting Templates, Analytic Chart of Accounts and Analytic Journals.
* Setup wizard changes
- Copy translations for COA, Tax, Tax Code and Fiscal Position from
templates to target objects.
""",
'website': 'http://www.openerp.com',
'depends' : ['account'],
'data': [],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
afrolov1/nova | nova/tests/api/openstack/compute/test_versions.py | 1 | 27566 | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid as stdlib_uuid
import feedparser
from lxml import etree
import webob
from nova.api.openstack.compute import versions
from nova.api.openstack.compute import views
from nova.api.openstack import xmlutil
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
from nova.tests import matchers
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
EXP_LINKS = {
'v2.0': {
'html': 'http://docs.openstack.org/',
},
}
EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
},
],
},
"v3.0": {
"id": "v3.0",
"status": "EXPERIMENTAL",
"updated": "2013-07-23T11:33:21Z",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=3",
}
],
}
}
class VersionsTest(test.NoDBTestCase):
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
versions = jsonutils.loads(res.body)["versions"]
expected = [
{
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
}],
},
{
"id": "v3.0",
"status": "EXPERIMENTAL",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v3/",
}],
},
]
self.assertEqual(versions, expected)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2/')
self.assertEqual(res.location, redirect_req.url)
def test_get_version_2_detail(self):
req = webob.Request.blank('/v2/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/"
"vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/"
"vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail_xml(self):
req = webob.Request.blank('/v2/')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
version = etree.XML(res.body)
xmlutil.validate_schema(version, 'version')
expected = EXP_VERSIONS['v2.0']
self.assertTrue(version.xpath('/ns:version', namespaces=NS))
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
expected['media-types']))
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v2/'}]
+ expected['links']))
def test_get_version_list_xml(self):
req = webob.Request.blank('/')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
root = etree.XML(res.body)
xmlutil.validate_schema(root, 'versions')
self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
versions = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(versions), 2)
for i, v in enumerate(['v2.0', 'v3.0']):
version = versions[i]
expected = EXP_VERSIONS[v]
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link,
[{'rel': 'self', 'href': 'http://localhost/%s/' % v}]))
def test_get_version_2_detail_atom(self):
req = webob.Request.blank('/v2/')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual("application/atom+xml", res.content_type)
xmlutil.validate_schema(etree.XML(res.body), 'atom')
f = feedparser.parse(res.body)
self.assertEqual(f.feed.title, 'About This Version')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/v2/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v2/')
self.assertEqual(entry.title, 'Version v2.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 2)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
'href': EXP_LINKS['v2.0']['html'],
'type': 'text/html',
'rel': 'describedby'})
def test_get_version_list_atom(self):
req = webob.Request.blank('/')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/atom+xml")
f = feedparser.parse(res.body)
self.assertEqual(f.feed.title, 'Available API Versions')
self.assertEqual(f.feed.updated, '2013-07-23T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 2)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v2/')
self.assertEqual(entry.title, 'Version v2.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
entry = f.entries[1]
self.assertEqual(entry.id, 'http://localhost/v3/')
self.assertEqual(entry.title, 'Version v3.0')
self.assertEqual(entry.updated, '2013-07-23T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v3.0 EXPERIMENTAL (2013-07-23T11:33:21Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v3/')
self.assertEqual(entry.links[0]['rel'], 'self')
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v3.0",
"status": "EXPERIMENTAL",
"links": [
{
"href": "http://localhost/v3/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=3",
}
],
},
{
"id": "v2.0",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml"
";version=2"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
def test_multi_choice_image_xml(self):
req = webob.Request.blank('/images/1')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/xml")
root = etree.XML(res.body)
self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
versions = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(versions), 2)
version = versions[1]
self.assertEqual(version.get('id'), 'v2.0')
self.assertEqual(version.get('status'), 'CURRENT')
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.
compare_media_types(media_types,
EXP_VERSIONS['v2.0']['media-types']
))
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
version = versions[0]
self.assertEqual(version.get('id'), 'v3.0')
self.assertEqual(version.get('status'), 'EXPERIMENTAL')
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.
compare_media_types(media_types,
EXP_VERSIONS['v3.0']['media-types']
))
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v3/images/1'}]))
def test_multi_choice_server_atom(self):
"""Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v3.0",
"status": "EXPERIMENTAL",
"links": [
{
"href": "http://localhost/v3/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=3",
}
],
},
{
"id": "v2.0",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml"
";version=2"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.NoDBTestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/v2/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(output, expected)
def test_generate_href(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2')
self.assertEqual(actual, expected)
def test_generate_href_v3(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v3/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v3.0')
self.assertEqual(actual, expected)
def test_generate_href_unknown(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('foo')
self.assertEqual(actual, expected)
class VersionsSerializerTests(test.NoDBTestCase):
def test_versions_list_xml_serializer(self):
versions_data = {
'versions': [
{
"id": "2.7",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
"links": [
{
"rel": "self",
"href": "http://test/v2",
},
],
},
]
}
serializer = versions.VersionsTemplate()
response = serializer.serialize(versions_data)
root = etree.XML(response)
xmlutil.validate_schema(root, 'versions')
self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
version_elems = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(version_elems), 1)
version = version_elems[0]
self.assertEqual(version.get('id'), versions_data['versions'][0]['id'])
self.assertEqual(version.get('status'),
versions_data['versions'][0]['status'])
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link, [{
'rel': 'self',
'href': 'http://test/v2',
'type': 'application/atom+xml'}]))
def test_versions_multi_xml_serializer(self):
versions_data = {
'choices': [
{
"id": "2.7",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
"media-types": EXP_VERSIONS['v2.0']['media-types'],
"links": [
{
"rel": "self",
"href": "http://test/v2/images",
},
],
},
]
}
serializer = versions.ChoicesTemplate()
response = serializer.serialize(versions_data)
root = etree.XML(response)
self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
(version,) = root.xpath('ns:version', namespaces=NS)
self.assertEqual(version.get('id'), versions_data['choices'][0]['id'])
self.assertEqual(version.get('status'),
versions_data['choices'][0]['status'])
media_types = list(version)[0]
self.assertEqual(media_types.tag.split('}')[1], "media-types")
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
versions_data['choices'][0]['media-types']))
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link,
versions_data['choices'][0]['links']))
def test_versions_list_atom_serializer(self):
versions_data = {
'versions': [
{
"id": "2.9.8",
"updated": "2011-07-20T11:40:00Z",
"status": "CURRENT",
"links": [
{
"rel": "self",
"href": "http://test/2.9.8",
},
],
},
]
}
serializer = versions.VersionsAtomSerializer()
response = serializer.serialize(versions_data)
f = feedparser.parse(response)
self.assertEqual(f.feed.title, 'Available API Versions')
self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z')
self.assertEqual(f.feed.id, 'http://test/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://test/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://test/2.9.8')
self.assertEqual(entry.title, 'Version 2.9.8')
self.assertEqual(entry.updated, '2011-07-20T11:40:00Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8')
self.assertEqual(entry.links[0]['rel'], 'self')
def test_version_detail_atom_serializer(self):
versions_data = {
"version": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml"
";version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2",
}
],
},
}
serializer = versions.VersionAtomSerializer()
response = serializer.serialize(versions_data)
f = feedparser.parse(response)
self.assertEqual(f.feed.title, 'About This Version')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/v2/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v2/')
self.assertEqual(entry.title, 'Version v2.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 2)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
'rel': 'describedby',
'type': 'text/html',
'href': EXP_LINKS['v2.0']['html']})
def test_multi_choice_image_with_body(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
req.method = 'POST'
req.content_type = "application/json"
req.body = "{\"foo\": \"bar\"}"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(300, res.status_int)
self.assertEqual("application/json", res.content_type)
def test_get_version_list_with_body(self):
req = webob.Request.blank('/')
req.accept = "application/json"
req.method = 'POST'
req.content_type = "application/json"
req.body = "{\"foo\": \"bar\"}"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
| apache-2.0 |
srivassumit/servo | etc/ci/performance/gecko_driver.py | 42 | 3804 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from contextlib import contextmanager
import json
import os
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
import sys
@contextmanager
def create_gecko_session():
try:
firefox_binary = os.environ['FIREFOX_BIN']
except KeyError:
print("+=============================================================+")
print("| You must set the path to your firefox binary to FIREFOX_BIN |")
print("+=============================================================+")
sys.exit()
driver = webdriver.Firefox(firefox_binary=firefox_binary)
yield driver
# driver.quit() gives an "'NoneType' object has no attribute 'path'" error.
# Fixed in
# https://github.com/SeleniumHQ/selenium/commit/9157c7071f9900c2608f5ca40ae4f518ed373b96
driver.quit()
def generate_placeholder(testcase):
# We need to still include the failed tests, otherwise Treeherder will
# consider the result to be a new test series, and thus a new graph. So we
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
timings = {
"testcase": testcase,
"title": ""
}
timing_names = [
"navigationStart",
"unloadEventStart",
"domLoading",
"fetchStart",
"responseStart",
"loadEventEnd",
"connectStart",
"domainLookupStart",
"redirectStart",
"domContentLoadedEventEnd",
"requestStart",
"secureConnectionStart",
"connectEnd",
"loadEventStart",
"domInteractive",
"domContentLoadedEventStart",
"redirectEnd",
"domainLookupEnd",
"unloadEventEnd",
"responseEnd",
"domComplete",
]
for name in timing_names:
timings[name] = 0 if name == "navigationStart" else -1
return [timings]
def run_gecko_test(testcase, timeout, is_async):
with create_gecko_session() as driver:
driver.set_page_load_timeout(timeout)
try:
driver.get(testcase)
except TimeoutException:
print("Timeout!")
return generate_placeholder(testcase)
try:
timings = {
"testcase": testcase,
"title": driver.title.replace(",", ",")
}
timings.update(json.loads(
driver.execute_script(
"return JSON.stringify(performance.timing)"
)
))
except:
# We need to return a timing object no matter what happened.
# See the comment in generate_placeholder() for explanation
print("Failed to get a valid timing measurement.")
return generate_placeholder(testcase)
if is_async:
# TODO: the timeout is hardcoded
driver.implicitly_wait(5) # sec
driver.find_element_by_id("GECKO_TEST_DONE")
timings.update(json.loads(
driver.execute_script(
"return JSON.stringify(window.customTimers)"
)
))
return [timings]
if __name__ == '__main__':
# Just for manual testing
from pprint import pprint
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
pprint(run_gecko_test(url, 15))
| mpl-2.0 |
odootr/odoo | addons/payment_paypal/models/res_company.py | 422 | 1752 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class ResCompany(osv.Model):
_inherit = "res.company"
def _get_paypal_account(self, cr, uid, ids, name, arg, context=None):
Acquirer = self.pool['payment.acquirer']
company_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.id
paypal_ids = Acquirer.search(cr, uid, [
('website_published', '=', True),
('name', 'ilike', 'paypal'),
('company_id', '=', company_id),
], limit=1, context=context)
if paypal_ids:
paypal = Acquirer.browse(cr, uid, paypal_ids[0], context=context)
return dict.fromkeys(ids, paypal.paypal_email_account)
return dict.fromkeys(ids, False)
def _set_paypal_account(self, cr, uid, id, name, value, arg, context=None):
Acquirer = self.pool['payment.acquirer']
company_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.id
paypal_account = self.browse(cr, uid, id, context=context).paypal_account
paypal_ids = Acquirer.search(cr, uid, [
('website_published', '=', True),
('paypal_email_account', '=', paypal_account),
('company_id', '=', company_id),
], context=context)
if paypal_ids:
Acquirer.write(cr, uid, paypal_ids, {'paypal_email_account': value}, context=context)
return True
_columns = {
'paypal_account': fields.function(
_get_paypal_account,
fnct_inv=_set_paypal_account,
nodrop=True,
type='char', string='Paypal Account',
help="Paypal username (usually email) for receiving online payments."
),
}
| agpl-3.0 |
renanrodm/namebench | nb_third_party/dns/rdtypes/IN/NAPTR.py | 248 | 4889 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.name
import dns.rdata
def _write_string(file, s):
l = len(s)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(s)
class NAPTR(dns.rdata.Rdata):
"""NAPTR record
@ivar order: order
@type order: int
@ivar preference: preference
@type preference: int
@ivar flags: flags
@type flags: string
@ivar service: service
@type service: string
@ivar regexp: regular expression
@type regexp: string
@ivar replacement: replacement name
@type replacement: dns.name.Name object
@see: RFC 3403"""
__slots__ = ['order', 'preference', 'flags', 'service', 'regexp',
'replacement']
def __init__(self, rdclass, rdtype, order, preference, flags, service,
regexp, replacement):
super(NAPTR, self).__init__(rdclass, rdtype)
self.order = order
self.preference = preference
self.flags = flags
self.service = service
self.regexp = regexp
self.replacement = replacement
def to_text(self, origin=None, relativize=True, **kw):
replacement = self.replacement.choose_relativity(origin, relativize)
return '%d %d "%s" "%s" "%s" %s' % \
(self.order, self.preference,
dns.rdata._escapify(self.flags),
dns.rdata._escapify(self.service),
dns.rdata._escapify(self.regexp),
self.replacement)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
order = tok.get_uint16()
preference = tok.get_uint16()
flags = tok.get_string()
service = tok.get_string()
regexp = tok.get_string()
replacement = tok.get_name()
replacement = replacement.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, order, preference, flags, service,
regexp, replacement)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
two_ints = struct.pack("!HH", self.order, self.preference)
file.write(two_ints)
_write_string(file, self.flags)
_write_string(file, self.service)
_write_string(file, self.regexp)
self.replacement.to_wire(file, compress, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(order, preference) = struct.unpack('!HH', wire[current : current + 4])
current += 4
rdlen -= 4
strings = []
for i in xrange(3):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen or rdlen < 0:
raise dns.exception.FormError
s = wire[current : current + l]
current += l
rdlen -= l
strings.append(s)
(replacement, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
replacement = replacement.relativize(origin)
return cls(rdclass, rdtype, order, preference, strings[0], strings[1],
strings[2], replacement)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.replacement = self.replacement.choose_relativity(origin,
relativize)
def _cmp(self, other):
sp = struct.pack("!HH", self.order, self.preference)
op = struct.pack("!HH", other.order, other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.flags, other.flags)
if v == 0:
v = cmp(self.service, other.service)
if v == 0:
v = cmp(self.regexp, other.regexp)
if v == 0:
v = cmp(self.replacement, other.replacement)
return v
| apache-2.0 |
dustymabe/ansible-modules-core | utilities/logic/async_wrapper.py | 10 | 7027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = file('/dev/null','rw')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid }))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
outdata = ''
try:
cmd = shlex.split(wrapped_cmd)
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
result = json.loads(outdata)
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd" : wrapped_cmd,
"msg": str(e),
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except:
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary notice only
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
####################
## main ##
####################
if __name__ == '__main__':
if len(sys.argv) < 3:
print(json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
if len(sys.argv) >= 5:
argsfile = sys.argv[4]
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
# setup job output directory
jobdir = os.path.expanduser("~/.ansible_async")
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except:
print(json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immmediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
print(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid, "results_file" : job_path }))
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)"%(sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s"%sub_pid)
time.sleep(1)
sys.exit(0)
notice("Done in kid B.")
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)"%os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)"%os.getpid())
sys.exit(0)
except SystemExit:
# On python2.4, SystemExit is a subclass of Exception.
# This block makes python2.4 behave the same as python2.5+
raise
except Exception:
e = sys.exc_info()[1]
notice("error: %s"%(e))
print(json.dumps({
"failed" : True,
"msg" : "FATAL ERROR: %s" % str(e)
}))
sys.exit(1)
| gpl-3.0 |
vikatory/kbengine | kbe/src/lib/python/Lib/test/test_tcl.py | 68 | 24957 | import unittest
import sys
import os
from test import support
# Skip this test if the _tkinter module wasn't built.
_tkinter = support.import_module('_tkinter')
# Make sure tkinter._fix runs to set up the environment
tkinter = support.import_fresh_module('tkinter')
from tkinter import Tcl
from _tkinter import TclError
try:
from _testcapi import INT_MAX, PY_SSIZE_T_MAX
except ImportError:
INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
tcl_version = _tkinter.TCL_VERSION.split('.')
try:
for i in range(len(tcl_version)):
tcl_version[i] = int(tcl_version[i])
except ValueError:
pass
tcl_version = tuple(tcl_version)
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = Tcl()
patchlevel = []
for x in tcl.call('info', 'patchlevel').split('.'):
try:
x = int(x, 10)
except ValueError:
x = -1
patchlevel.append(x)
_tk_patchlevel = tuple(patchlevel)
return _tk_patchlevel
class TkinterTest(unittest.TestCase):
def testFlattenLen(self):
# flatten(<object with no length>)
self.assertRaises(TypeError, _tkinter._flatten, True)
class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
self.wantobjects = self.interp.tk.wantobjects()
def testEval(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.eval('set a'),'1')
def test_eval_null_in_result(self):
tcl = self.interp
self.assertEqual(tcl.eval('set a "a\\0b"'), 'a\x00b')
def testEvalException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'set a')
def testEvalException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'this is wrong')
def testCall(self):
tcl = self.interp
tcl.call('set','a','1')
self.assertEqual(tcl.call('set','a'),'1')
def testCallException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'set','a')
def testCallException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'this','is','wrong')
def testSetVar(self):
tcl = self.interp
tcl.setvar('a','1')
self.assertEqual(tcl.eval('set a'),'1')
def testSetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)','1')
self.assertEqual(tcl.eval('set a(1)'),'1')
def testGetVar(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.getvar('a'),'1')
def testGetVarArray(self):
tcl = self.interp
tcl.eval('set a(1) 1')
self.assertEqual(tcl.getvar('a(1)'),'1')
def testGetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a')
def testGetVarArrayException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a(1)')
def testUnsetVar(self):
tcl = self.interp
tcl.setvar('a',1)
self.assertEqual(tcl.eval('info exists a'),'1')
tcl.unsetvar('a')
self.assertEqual(tcl.eval('info exists a'),'0')
def testUnsetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)',1)
tcl.setvar('a(2)',2)
self.assertEqual(tcl.eval('info exists a(1)'),'1')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
tcl.unsetvar('a(1)')
self.assertEqual(tcl.eval('info exists a(1)'),'0')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
def testUnsetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
def test_getint(self):
tcl = self.interp.tk
self.assertEqual(tcl.getint(' 42 '), 42)
self.assertEqual(tcl.getint(42), 42)
self.assertRaises(TypeError, tcl.getint)
self.assertRaises(TypeError, tcl.getint, '42', '10')
self.assertRaises(TypeError, tcl.getint, b'42')
self.assertRaises(TypeError, tcl.getint, 42.0)
self.assertRaises(TclError, tcl.getint, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getint, '42\0')
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getint, '42\ud800')
def test_getdouble(self):
tcl = self.interp.tk
self.assertEqual(tcl.getdouble(' 42 '), 42.0)
self.assertEqual(tcl.getdouble(' 42.5 '), 42.5)
self.assertEqual(tcl.getdouble(42.5), 42.5)
self.assertRaises(TypeError, tcl.getdouble)
self.assertRaises(TypeError, tcl.getdouble, '42.5', '10')
self.assertRaises(TypeError, tcl.getdouble, b'42.5')
self.assertRaises(TypeError, tcl.getdouble, 42)
self.assertRaises(TclError, tcl.getdouble, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getdouble, '42.5\0')
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getdouble, '42.5\ud800')
def test_getboolean(self):
tcl = self.interp.tk
self.assertIs(tcl.getboolean('on'), True)
self.assertIs(tcl.getboolean('1'), True)
self.assertEqual(tcl.getboolean(42), 42)
self.assertRaises(TypeError, tcl.getboolean)
self.assertRaises(TypeError, tcl.getboolean, 'on', '1')
self.assertRaises(TypeError, tcl.getboolean, b'on')
self.assertRaises(TypeError, tcl.getboolean, 1.0)
self.assertRaises(TclError, tcl.getboolean, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getboolean, 'on\0')
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getboolean, 'on\ud800')
def testEvalFile(self):
tcl = self.interp
with open(support.TESTFN, 'w') as f:
self.addCleanup(support.unlink, support.TESTFN)
f.write("""set a 1
set b 2
set c [ expr $a + $b ]
""")
tcl.evalfile(support.TESTFN)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
def test_evalfile_null_in_result(self):
tcl = self.interp
with open(support.TESTFN, 'w') as f:
self.addCleanup(support.unlink, support.TESTFN)
f.write("""
set a "a\0b"
set b "a\\0b"
""")
tcl.evalfile(support.TESTFN)
self.assertEqual(tcl.eval('set a'), 'a\x00b')
self.assertEqual(tcl.eval('set b'), 'a\x00b')
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
try:
os.remove(filename)
except Exception as e:
pass
self.assertRaises(TclError,tcl.evalfile,filename)
def testPackageRequireException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
@unittest.skipUnless(sys.platform == 'win32', 'Requires Windows')
def testLoadWithUNC(self):
# Build a UNC path from the regular path.
# Something like
# \\%COMPUTERNAME%\c$\python27\python.exe
fullname = os.path.abspath(sys.executable)
if fullname[1] != ':':
raise unittest.SkipTest('Absolute path should have drive part')
unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'],
fullname[0],
fullname[3:])
if not os.path.exists(unc_name):
raise unittest.SkipTest('Cannot connect to UNC Path')
with support.EnvironmentVarGuard() as env:
env.unset("TCL_LIBRARY")
f = os.popen('%s -c "import tkinter; print(tkinter)"' % (unc_name,))
self.assertIn('tkinter', f.read())
# exit code must be zero
self.assertEqual(f.close(), None)
def test_exprstring(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprstring(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, str)
self.assertRaises(TypeError, tcl.exprstring)
self.assertRaises(TypeError, tcl.exprstring, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprstring, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprstring, 'spam')
check('', '0')
check('8.2 + 6', '14.2')
check('3.1 + $a', '6.1')
check('2 + "$a.$b"', '5.6')
check('4*[llength "6 2"]', '8')
check('{word one} < "word $a"', '0')
check('4*2 < 7', '0')
check('hypot($a, 4)', '5.0')
check('5 / 4', '1')
check('5 / 4.0', '1.25')
check('5 / ( [string length "abcd"] + 0.0 )', '1.25')
check('20.0/5.0', '4.0')
check('"0x03" > "2"', '1')
check('[string length "a\xbd\u20ac"]', '3')
check(r'[string length "a\xbd\u20ac"]', '3')
check('"abc"', 'abc')
check('"a\xbd\u20ac"', 'a\xbd\u20ac')
check(r'"a\xbd\u20ac"', 'a\xbd\u20ac')
check(r'"a\0b"', 'a\x00b')
if tcl_version >= (8, 5):
check('2**64', str(2**64))
def test_exprdouble(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprdouble(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, float)
self.assertRaises(TypeError, tcl.exprdouble)
self.assertRaises(TypeError, tcl.exprdouble, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprdouble, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprdouble, 'spam')
check('', 0.0)
check('8.2 + 6', 14.2)
check('3.1 + $a', 6.1)
check('2 + "$a.$b"', 5.6)
check('4*[llength "6 2"]', 8.0)
check('{word one} < "word $a"', 0.0)
check('4*2 < 7', 0.0)
check('hypot($a, 4)', 5.0)
check('5 / 4', 1.0)
check('5 / 4.0', 1.25)
check('5 / ( [string length "abcd"] + 0.0 )', 1.25)
check('20.0/5.0', 4.0)
check('"0x03" > "2"', 1.0)
check('[string length "a\xbd\u20ac"]', 3.0)
check(r'[string length "a\xbd\u20ac"]', 3.0)
self.assertRaises(TclError, tcl.exprdouble, '"abc"')
if tcl_version >= (8, 5):
check('2**64', float(2**64))
def test_exprlong(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprlong(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertRaises(TypeError, tcl.exprlong)
self.assertRaises(TypeError, tcl.exprlong, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprlong, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprlong, 'spam')
check('', 0)
check('8.2 + 6', 14)
check('3.1 + $a', 6)
check('2 + "$a.$b"', 5)
check('4*[llength "6 2"]', 8)
check('{word one} < "word $a"', 0)
check('4*2 < 7', 0)
check('hypot($a, 4)', 5)
check('5 / 4', 1)
check('5 / 4.0', 1)
check('5 / ( [string length "abcd"] + 0.0 )', 1)
check('20.0/5.0', 4)
check('"0x03" > "2"', 1)
check('[string length "a\xbd\u20ac"]', 3)
check(r'[string length "a\xbd\u20ac"]', 3)
self.assertRaises(TclError, tcl.exprlong, '"abc"')
if tcl_version >= (8, 5):
self.assertRaises(TclError, tcl.exprlong, '2**64')
def test_exprboolean(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprboolean(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertNotIsInstance(result, bool)
self.assertRaises(TypeError, tcl.exprboolean)
self.assertRaises(TypeError, tcl.exprboolean, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprboolean, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprboolean, 'spam')
check('', False)
for value in ('0', 'false', 'no', 'off'):
check(value, False)
check('"%s"' % value, False)
check('{%s}' % value, False)
for value in ('1', 'true', 'yes', 'on'):
check(value, True)
check('"%s"' % value, True)
check('{%s}' % value, True)
check('8.2 + 6', True)
check('3.1 + $a', True)
check('2 + "$a.$b"', True)
check('4*[llength "6 2"]', True)
check('{word one} < "word $a"', False)
check('4*2 < 7', False)
check('hypot($a, 4)', True)
check('5 / 4', True)
check('5 / 4.0', True)
check('5 / ( [string length "abcd"] + 0.0 )', True)
check('20.0/5.0', True)
check('"0x03" > "2"', True)
check('[string length "a\xbd\u20ac"]', True)
check(r'[string length "a\xbd\u20ac"]', True)
self.assertRaises(TclError, tcl.exprboolean, '"abc"')
if tcl_version >= (8, 5):
check('2**64', True)
def test_passing_values(self):
def passValue(value):
return self.interp.call('set', '_', value)
self.assertEqual(passValue(True), True if self.wantobjects else '1')
self.assertEqual(passValue(False), False if self.wantobjects else '0')
self.assertEqual(passValue('string'), 'string')
self.assertEqual(passValue('string\u20ac'), 'string\u20ac')
self.assertEqual(passValue('str\x00ing'), 'str\x00ing')
self.assertEqual(passValue('str\x00ing\xbd'), 'str\x00ing\xbd')
self.assertEqual(passValue('str\x00ing\u20ac'), 'str\x00ing\u20ac')
self.assertEqual(passValue(b'str\x00ing'),
b'str\x00ing' if self.wantobjects else 'str\x00ing')
self.assertEqual(passValue(b'str\xc0\x80ing'),
b'str\xc0\x80ing' if self.wantobjects else 'str\xc0\x80ing')
self.assertEqual(passValue(b'str\xbding'),
b'str\xbding' if self.wantobjects else 'str\xbding')
for i in (0, 1, -1, 2**31-1, -2**31):
self.assertEqual(passValue(i), i if self.wantobjects else str(i))
for f in (0.0, 1.0, -1.0, 1/3,
sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
if self.wantobjects:
self.assertEqual(passValue(f), f)
else:
self.assertEqual(float(passValue(f)), f)
if self.wantobjects:
f = passValue(float('nan'))
self.assertNotEqual(f, f)
self.assertEqual(passValue(float('inf')), float('inf'))
self.assertEqual(passValue(-float('inf')), -float('inf'))
else:
self.assertEqual(float(passValue(float('inf'))), float('inf'))
self.assertEqual(float(passValue(-float('inf'))), -float('inf'))
# XXX NaN representation can be not parsable by float()
self.assertEqual(passValue((1, '2', (3.4,))),
(1, '2', (3.4,)) if self.wantobjects else '1 2 3.4')
def test_user_command(self):
result = None
def testfunc(arg):
nonlocal result
result = arg
return arg
self.interp.createcommand('testfunc', testfunc)
self.addCleanup(self.interp.tk.deletecommand, 'testfunc')
def check(value, expected=None, *, eq=self.assertEqual):
if expected is None:
expected = value
nonlocal result
result = None
r = self.interp.call('testfunc', value)
self.assertIsInstance(result, str)
eq(result, expected)
self.assertIsInstance(r, str)
eq(r, expected)
def float_eq(actual, expected):
self.assertAlmostEqual(float(actual), expected,
delta=abs(expected) * 1e-10)
check(True, '1')
check(False, '0')
check('string')
check('string\xbd')
check('string\u20ac')
check('')
check(b'string', 'string')
check(b'string\xe2\x82\xac', 'string\xe2\x82\xac')
check(b'string\xbd', 'string\xbd')
check(b'', '')
check('str\x00ing')
check('str\x00ing\xbd')
check('str\x00ing\u20ac')
check(b'str\x00ing', 'str\x00ing')
check(b'str\xc0\x80ing', 'str\xc0\x80ing')
check(b'str\xc0\x80ing\xe2\x82\xac', 'str\xc0\x80ing\xe2\x82\xac')
for i in (0, 1, -1, 2**31-1, -2**31):
check(i, str(i))
for f in (0.0, 1.0, -1.0):
check(f, repr(f))
for f in (1/3.0, sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
check(f, eq=float_eq)
check(float('inf'), eq=float_eq)
check(-float('inf'), eq=float_eq)
# XXX NaN representation can be not parsable by float()
check((), '')
check((1, (2,), (3, 4), '5 6', ()), '1 2 {3 4} {5 6} {}')
def test_splitlist(self):
splitlist = self.interp.tk.splitlist
call = self.interp.tk.call
self.assertRaises(TypeError, splitlist)
self.assertRaises(TypeError, splitlist, 'a', 'b')
self.assertRaises(TypeError, splitlist, 2)
testcases = [
('2', ('2',)),
('', ()),
('{}', ('',)),
('""', ('',)),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(b'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \u20ac', ('a', '\u20ac')),
(b'a \xe2\x82\xac', ('a', '\u20ac')),
(b'a\xc0\x80b c\xc0\x80d', ('a\x00b', 'c\x00d')),
('a {b c}', ('a', 'b c')),
(r'a b\ c', ('a', 'b c')),
(('a', 'b c'), ('a', 'b c')),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', '\u20ac', '\xe2\x82\xac', '3.4')
else:
expected = (12, '\u20ac', b'\xe2\x82\xac', (3.4,))
testcases += [
(call('dict', 'create', 12, '\u20ac', b'\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(splitlist(arg), res, msg=arg)
self.assertRaises(TclError, splitlist, '{')
def test_split(self):
split = self.interp.tk.split
call = self.interp.tk.call
self.assertRaises(TypeError, split)
self.assertRaises(TypeError, split, 'a', 'b')
self.assertRaises(TypeError, split, 2)
testcases = [
('2', '2'),
('', ''),
('{}', ''),
('""', ''),
('{', '{'),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(b'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \u20ac', ('a', '\u20ac')),
(b'a \xe2\x82\xac', ('a', '\u20ac')),
(b'a\xc0\x80b', 'a\x00b'),
(b'a\xc0\x80b c\xc0\x80d', ('a\x00b', 'c\x00d')),
(b'{a\xc0\x80b c\xc0\x80d', '{a\x00b c\x00d'),
('a {b c}', ('a', ('b', 'c'))),
(r'a b\ c', ('a', ('b', 'c'))),
(('a', b'b c'), ('a', ('b', 'c'))),
(('a', 'b c'), ('a', ('b', 'c'))),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
(('a', (2, 3.4)), ('a', (2, 3.4))),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', '\u20ac', '\xe2\x82\xac', '3.4')
else:
expected = (12, '\u20ac', b'\xe2\x82\xac', (3.4,))
testcases += [
(call('dict', 'create', 12, '\u20ac', b'\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(split(arg), res, msg=arg)
def test_splitdict(self):
splitdict = tkinter._splitdict
tcl = self.interp.tk
arg = '-a {1 2 3} -something foo status {}'
self.assertEqual(splitdict(tcl, arg, False),
{'-a': '1 2 3', '-something': 'foo', 'status': ''})
self.assertEqual(splitdict(tcl, arg),
{'a': '1 2 3', 'something': 'foo', 'status': ''})
arg = ('-a', (1, 2, 3), '-something', 'foo', 'status', '{}')
self.assertEqual(splitdict(tcl, arg, False),
{'-a': (1, 2, 3), '-something': 'foo', 'status': '{}'})
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3), 'something': 'foo', 'status': '{}'})
self.assertRaises(RuntimeError, splitdict, tcl, '-a b -c ')
self.assertRaises(RuntimeError, splitdict, tcl, ('-a', 'b', '-c'))
arg = tcl.call('list',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3) if self.wantobjects else '1 2 3',
'something': 'foo', 'status': ''})
if tcl_version >= (8, 5):
arg = tcl.call('dict', 'create',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = {'a': '1 2 3', 'something': 'foo', 'status': ''}
else:
expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''}
self.assertEqual(splitdict(tcl, arg), expected)
class BigmemTclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
@support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@support.bigmemtest(size=INT_MAX + 1, memuse=5, dry_run=False)
def test_huge_string_call(self, size):
value = ' ' * size
self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
@support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@support.bigmemtest(size=INT_MAX + 1, memuse=9, dry_run=False)
def test_huge_string_builtins(self, size):
value = '1' + ' ' * size
self.assertRaises(OverflowError, self.interp.tk.getint, value)
self.assertRaises(OverflowError, self.interp.tk.getdouble, value)
self.assertRaises(OverflowError, self.interp.tk.getboolean, value)
self.assertRaises(OverflowError, self.interp.eval, value)
self.assertRaises(OverflowError, self.interp.evalfile, value)
self.assertRaises(OverflowError, self.interp.record, value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.setvar, value, 'x', 'a')
self.assertRaises(OverflowError, self.interp.setvar, 'x', value, 'a')
self.assertRaises(OverflowError, self.interp.unsetvar, value)
self.assertRaises(OverflowError, self.interp.unsetvar, 'x', value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.exprstring, value)
self.assertRaises(OverflowError, self.interp.exprlong, value)
self.assertRaises(OverflowError, self.interp.exprboolean, value)
self.assertRaises(OverflowError, self.interp.splitlist, value)
self.assertRaises(OverflowError, self.interp.split, value)
self.assertRaises(OverflowError, self.interp.createcommand, value, max)
self.assertRaises(OverflowError, self.interp.deletecommand, value)
def setUpModule():
if support.verbose:
tcl = Tcl()
print('patchlevel =', tcl.call('info', 'patchlevel'))
def test_main():
support.run_unittest(TclTest, TkinterTest, BigmemTclTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
haad/ansible-modules-extras | network/asa/asa_command.py | 31 | 7099 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: asa_command
version_added: "2.2"
author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
short_description: Run arbitrary commands on Cisco ASA devices.
description:
- Sends arbitrary commands to an ASA node and returns the results
read from the device. The M(asa_command) module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: asa
options:
commands:
description:
- List of commands to send to the remote device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retires as expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
authorize: yes
auth_pass: cisco
transport: cli
- asa_command:
commands:
- show version
provider: "{{ cli }}"
- asa_command:
commands:
- show asp drop
- show memory
provider: "{{ cli }}"
- asa_command:
commands:
- show version
provider: "{{ cli }}"
context: system
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
retured: failed
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
from ansible.module_utils.asa import NetworkModule, NetworkError
VALID_KEYS = ['command', 'prompt', 'response']
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module):
for cmd in module.params['commands']:
if isinstance(cmd, basestring):
cmd = dict(command=cmd, output=None)
elif 'command' not in cmd:
module.fail_json(msg='command keyword argument is required')
elif not set(cmd.keys()).issubset(VALID_KEYS):
module.fail_json(msg='unknown keyword specified')
yield cmd
def main():
spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = NetworkModule(argument_spec=spec,
connect_on_load=False,
supports_check_mode=True)
commands = list(parse_commands(module))
conditionals = module.params['wait_for'] or list()
warnings = list()
runner = CommandRunner(module)
for cmd in commands:
if module.check_mode and not cmd['command'].startswith('show'):
warnings.append('only show commands are supported when using '
'check mode, not executing `%s`' % cmd['command'])
else:
if cmd['command'].startswith('conf'):
module.fail_json(msg='asa_command does not support running '
'config mode commands. Please use '
'asa_config instead')
try:
runner.add_command(**cmd)
except AddCommandError:
exc = get_exception()
warnings.append('duplicate command detected: %s' % cmd)
for item in conditionals:
runner.add_conditional(item)
runner.retries = module.params['retries']
runner.interval = module.params['interval']
runner.match = module.params['match']
try:
runner.run()
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc))
result = dict(changed=False, stdout=list())
for cmd in commands:
try:
output = runner.get_command(cmd['command'])
except ValueError:
output = 'command not executed due to check_mode, see warnings'
result['stdout'].append(output)
result['warnings'] = warnings
result['stdout_lines'] = list(to_lines(result['stdout']))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
fduraffourg/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/close_wsh.py | 495 | 2835 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
from mod_pywebsocket import common
from mod_pywebsocket import stream
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
code, reason = line.split(' ', 1)
if code is None or reason is None:
return
request.ws_stream.close_connection(int(code), reason)
# close_connection() initiates closing handshake. It validates code
# and reason. If you want to send a broken close frame for a test,
# following code will be useful.
# > data = struct.pack('!H', int(code)) + reason.encode('UTF-8')
# > request.connection.write(stream.create_close_frame(data))
# > # Suppress to re-respond client responding close frame.
# > raise Exception("customized server initiated closing handshake")
def web_socket_passive_closing_handshake(request):
# Simply echo a close status code
code, reason = request.ws_close_code, request.ws_close_reason
# pywebsocket sets pseudo code for receiving an empty body close frame.
if code == common.STATUS_NO_STATUS_RECEIVED:
code = None
reason = ''
return code, reason
# vi:sts=4 sw=4 et
| mpl-2.0 |
virtualnobi/MediaFiler | Model/MediaOrganization/testOrganizationByDate.py | 1 | 3086 | #!python
# -*- coding: latin-1 -*-
"""
(c) by nobisoft 2016-
"""
# Imports
## Standard
from __future__ import print_function
import unittest
import StringIO
## Contributed
## nobi
## Project
#import Model.Installer # to resolve import sequence issues
from OrganizationByDate import OrganizationByDate
class TestOrganizationByDate(unittest.TestCase):
"""
"""
# Constants
# Class Variables
# Class Methods
# Lifecycle
# Setters
# Getters
# Event Handlers
# Inheritance - Superclass
# Other API Functions
def testDeriveDateFromPath(self):
# self.assertTrue(True, 'message')
# self.assertFalse(False, 'message')
# with self.assertRaises(ValueError):
# PartialDateTime('non-date string')
self.verifyInvalidDate('/test/548183.rest')
self.checkDeriveDateFromPath('/test/2000-04-01.rest', '2000', '04', '01', '.rest')
self.checkDeriveDateFromPath('/test/00-04-01.rest', '2000', '04', '01', '.rest')
# self.checkDeriveDateFromPath('/test/01.04.2000.rest', '2000', '04', '01')
# self.checkDeriveDateFromPath('/test/01.04.00.rest', '2000', '04', '01')
self.checkDeriveDateFromPath('/test/nobi.2005.rest', '2005', None, None, '.rest')
self.checkDeriveDateFromPath('\\test\\1980-03.Algerien\\234.jpg', '1980', '03', None, '.Algerien\\234.jpg')
self.checkDeriveDateFromPath('\\test\\2015-02.Schwellbrunn\\IMG_1980.jpg', '2015', '02', None, '.Schwellbrunn\\IMG_1980.jpg')
self.checkDeriveDateFromPath('/test/IMG_1957.JPG', '1957', None, None, '.JPG')
self.checkDeriveDateFromPath('/test/2005.nobi.rest', '2005', None, None, '.nobi.rest')
self.checkDeriveDateFromPath('/test/2008-nobi-Holger.png', '2008', None, None, '-nobi-Holger.png')
self.checkDeriveDateFromPath('\\test\\0000\\0000-001.rest', '0000', None, None, '-001.rest')
# self.checkDeriveDateFromPath('/test/20150930-_MG_2425.rest', '2015', '09', '30')
# self.checkDeriveDateFromPath('/test/IMG_20150809_175625.jpg', '2015', '08', '09')
# self.checkDeriveDateFromPath('\\test\\2015-02.Schwellbrunn\\IMG_20150219_175347.jpg', '2015', '02', '19')
# Internal - to change without notice
def verifyInvalidDate(self, path):
"""
"""
self.checkDeriveDateFromPath(path, OrganizationByDate.UnknownDateName, None, None, None)
def checkDeriveDateFromPath(self, path, targetYear, targetMonth, targetDay, targetRest=None):
"""
"""
log = StringIO.StringIO()
(year, month, day, rest) = OrganizationByDate.deriveDateFromPath(log, path)
self.assertEqual((year, month, day),
(targetYear, targetMonth, targetDay),
('Failure at "%s": %s-%s-%s with rest "%s"' % (path, year, month, day, rest)))
if (targetRest <> None):
self.assertEqual(targetRest, rest, ('Failure at "%s": Rest "%s" unequal to "%s"' % (path, rest, targetRest)))
# Class Initialization
pass
# Executable Script
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
xpansa/stock-logistics-tracking | stock_packaging_usability_ul/wizard/__init__.py | 1 | 1076 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Stock Packaging Usability UL module for Odoo
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import stock_select_ul
| agpl-3.0 |
mapleoin/cuZmeura | ads/migrations/0006_added_articles.py | 3 | 8975 | # -*- coding: utf-8 -*-
from south.db import db
from django.db import models
from cuZmeura.ads.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Article'
db.create_table('ads_article', (
('id', orm['ads.article:id']),
('title', orm['ads.article:title']),
('slug', orm['ads.article:slug']),
('created_at', orm['ads.article:created_at']),
('published', orm['ads.article:published']),
('body', orm['ads.article:body']),
))
db.send_create_signal('ads', ['Article'])
# Changing field 'Impression.referer_netloc'
# (to signature: django.db.models.fields.URLField(max_length=400, null=True))
db.alter_column('ads_impression', 'referer_netloc', orm['ads.impression:referer_netloc'])
# Changing field 'Impression.referer'
# (to signature: django.db.models.fields.URLField(max_length=400, null=True))
db.alter_column('ads_impression', 'referer', orm['ads.impression:referer'])
# Changing field 'Publisher.slug'
# (to signature: django.db.models.fields.SlugField(unique=True, max_length=15, db_index=True))
db.alter_column('ads_publisher', 'slug', orm['ads.publisher:slug'])
# Creating unique_together for [url] on Publisher.
db.create_unique('ads_publisher', ['url'])
def backwards(self, orm):
# Deleting unique_together for [url] on Publisher.
db.delete_unique('ads_publisher', ['url'])
# Deleting model 'Article'
db.delete_table('ads_article')
# Changing field 'Impression.referer_netloc'
# (to signature: django.db.models.fields.URLField(max_length=200, null=True))
db.alter_column('ads_impression', 'referer_netloc', orm['ads.impression:referer_netloc'])
# Changing field 'Impression.referer'
# (to signature: django.db.models.fields.URLField(max_length=200, null=True))
db.alter_column('ads_impression', 'referer', orm['ads.impression:referer'])
# Changing field 'Publisher.slug'
# (to signature: django.db.models.fields.SlugField(max_length=10, unique=True, db_index=True))
db.alter_column('ads_publisher', 'slug', orm['ads.publisher:slug'])
models = {
'ads.ad': {
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ads.Product']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ads.AdSize']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'ads.adsize': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'size': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
'ads.article': {
'body': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
'ads.impression': {
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ads.Ad']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'publisher': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'referer': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True'}),
'referer_netloc': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'ads.product': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ads.publisher': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '15', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
'ads.useractivation': {
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_expires': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['ads']
| agpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/reportlab-3.2.0/demos/gadflypaper/gfe.py | 15 | 32480 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__doc__=''
__version__=''' $Id$ '''
#REPORTLAB_TEST_SCRIPT
import sys
from reportlab.platypus import *
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
PAGE_HEIGHT=defaultPageSize[1]
styles = getSampleStyleSheet()
Title = "Integrating Diverse Data Sources with Gadfly 2"
Author = "Aaron Watters"
URL = "http://www.chordate.com/"
email = "[email protected]"
Abstract = """This paper describes the primative methods underlying the implementation
of SQL query evaluation in Gadfly 2, a database management system implemented
in Python [Van Rossum]. The major design goals behind
the architecture described here are to simplify the implementation
and to permit flexible and efficient extensions to the gadfly
engine. Using this architecture and its interfaces programmers
can add functionality to the engine such as alternative disk based
indexed table implementations, dynamic interfaces to remote data
bases or or other data sources, and user defined computations."""
from reportlab.lib.units import inch
pageinfo = "%s / %s / %s" % (Author, email, Title)
def myFirstPage(canvas, doc):
canvas.saveState()
#canvas.setStrokeColorRGB(1,0,0)
#canvas.setLineWidth(5)
#canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Bold',16)
canvas.drawString(108, PAGE_HEIGHT-108, Title)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "First Page / %s" % pageinfo)
canvas.restoreState()
def myLaterPages(canvas, doc):
#canvas.drawImage("snkanim.gif", 36, 36)
canvas.saveState()
#canvas.setStrokeColorRGB(1,0,0)
#canvas.setLineWidth(5)
#canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d %s" % (doc.page, pageinfo))
canvas.restoreState()
def go():
Elements.insert(0,Spacer(0,inch))
doc = SimpleDocTemplate('gfe.pdf')
doc.build(Elements,onFirstPage=myFirstPage, onLaterPages=myLaterPages)
Elements = []
HeaderStyle = styles["Heading1"] # XXXX
def header(txt, style=HeaderStyle, klass=Paragraph, sep=0.3):
s = Spacer(0.2*inch, sep*inch)
Elements.append(s)
para = klass(txt, style)
Elements.append(para)
ParaStyle = styles["Normal"]
def p(txt):
return header(txt, style=ParaStyle, sep=0.1)
#pre = p # XXX
PreStyle = styles["Code"]
def pre(txt):
s = Spacer(0.1*inch, 0.1*inch)
Elements.append(s)
p = Preformatted(txt, PreStyle)
Elements.append(p)
#header(Title, sep=0.1. style=ParaStyle)
header(Author, sep=0.1, style=ParaStyle)
header(URL, sep=0.1, style=ParaStyle)
header(email, sep=0.1, style=ParaStyle)
header("ABSTRACT")
p(Abstract)
header("Backgrounder")
p("""\
The term "database" usually refers to a persistent
collection of data. Data is persistent if it continues
to exist whether or not it is associated with a running
process on the computer, or even if the computer is
shut down and restarted at some future time. Database
management systems provide support for constructing databases,
maintaining databases, and extracting information from databases.""")
p("""\
Relational databases manipulate and store persistent
table structures called relations, such as the following
three tables""")
pre("""\
-- drinkers who frequent bars (this is a comment)
select * from frequents
DRINKER | PERWEEK | BAR
============================
adam | 1 | lolas
woody | 5 | cheers
sam | 5 | cheers
norm | 3 | cheers
wilt | 2 | joes
norm | 1 | joes
lola | 6 | lolas
norm | 2 | lolas
woody | 1 | lolas
pierre | 0 | frankies
)
""")
pre("""\
-- drinkers who like beers
select * from likes
DRINKER | PERDAY | BEER
===============================
adam | 2 | bud
wilt | 1 | rollingrock
sam | 2 | bud
norm | 3 | rollingrock
norm | 2 | bud
nan | 1 | sierranevada
woody | 2 | pabst
lola | 5 | mickies
""")
pre("""\
-- beers served from bars
select * from serves
BAR | QUANTITY | BEER
=================================
cheers | 500 | bud
cheers | 255 | samadams
joes | 217 | bud
joes | 13 | samadams
joes | 2222 | mickies
lolas | 1515 | mickies
lolas | 333 | pabst
winkos | 432 | rollingrock
frankies | 5 | snafu
""")
p("""
The relational model for database structures makes
the simplifying assumption that all data in a database
can be represented in simple table structures
such as these. Although this assumption seems extreme
it provides a good foundation for defining solid and
well defined database management systems and some
of the most successful software companies in the
world, such as Oracle, Sybase, IBM, and Microsoft,
have marketed database management systems based on
the relational model quite successfully.
""")
p("""
SQL stands for Structured Query Language.
The SQL language defines industry standard
mechanisms for creating, querying, and modified
relational tables. Several years ago SQL was one
of many Relational Database Management System
(RDBMS) query languages in use, and many would
argue not the best on. Now, largely due
to standardization efforts and the
backing of IBM, SQL is THE standard way to talk
to database systems.
""")
p("""
There are many advantages SQL offers over other
database query languages and alternative paradigms
at this time (please see [O'Neill] or [Korth and Silberschatz]
for more extensive discussions and comparisons between the
SQL/relational approach and others.)
""")
p("""
The chief advantage over all contenders at this time
is that SQL and the relational model are now widely
used as interfaces and back end data stores to many
different products with different performance characteristics,
user interfaces, and other qualities: Oracle, Sybase,
Ingres, SQL Server, Access, Outlook,
Excel, IBM DB2, Paradox, MySQL, MSQL, POSTgres, and many
others. For this reason a program designed to use
an SQL database as its data storage mechanism can
easily be ported from one SQL data manager to another,
possibly on different platforms. In fact the same
program can seamlessly use several backends and/or
import/export data between different data base platforms
with trivial ease.
No other paradigm offers such flexibility at the moment.
""")
p("""
Another advantage which is not as immediately
obvious is that the relational model and the SQL
query language are easily understood by semi-technical
and non-technical professionals, such as business
people and accountants. Human resources managers
who would be terrified by an object model diagram
or a snippet of code that resembles a conventional
programming language will frequently feel quite at
ease with a relational model which resembles the
sort of tabular data they deal with on paper in
reports and forms on a daily basis. With a little training the
same HR managers may be able to translate the request
"Who are the drinkers who like bud and frequent cheers?"
into the SQL query
""")
pre("""
select drinker
from frequents
where bar='cheers'
and drinker in (
select drinker
from likes
where beer='bud')
""")
p("""
(or at least they have some hope of understanding
the query once it is written by a technical person
or generated by a GUI interface tool). Thus the use
of SQL and the relational model enables communication
between different communities which must understand
and interact with stored information. In contrast
many other approaches cannot be understood easily
by people without extensive programming experience.
""")
p("""
Furthermore the declarative nature of SQL
lends itself to automatic query optimization,
and engines such as Gadfly can automatically translate a user query
into an optimized query plan which takes
advantage of available indices and other data characteristics.
In contrast more navigational techniques require the application
program itself to optimize the accesses to the database and
explicitly make use of indices.
""")
# HACK
Elements.append(PageBreak())
p("""
While it must be admitted that there are application
domains such as computer aided engineering design where
the relational model is unnatural, it is also important
to recognize that for many application domains (such
as scheduling, accounting, inventory, finance, personal
information management, electronic mail) the relational
model is a very natural fit and the SQL query language
make most accesses to the underlying data (even sophisticated
ones) straightforward. """)
p("""For an example of a moderately
sophisticated query using the tables given above,
the following query lists the drinkers who frequent lolas bar
and like at least two beers not served by lolas
""")
if 0:
go()
sys.exit(1)
pre("""
select f.drinker
from frequents f, likes l
where f.drinker=l.drinker and f.bar='lolas'
and l.beer not in
(select beer from serves where bar='lolas')
group by f.drinker
having count(distinct beer)>=2
""")
p("""
yielding the result
""")
pre("""
DRINKER
=======
norm
""")
p("""
Experience shows that queries of this sort are actually
quite common in many applications, and are often much more
difficult to formulate using some navigational database
organizations, such as some "object oriented" database
paradigms.
""")
p("""
Certainly,
SQL does not provide all you need to interact with
databases -- in order to do "real work" with SQL you
need to use SQL and at least one other language
(such as C, Pascal, C++, Perl, Python, TCL, Visual Basic
or others) to do work (such as readable formatting a report
from raw data) that SQL was not designed to do.
""")
header("Why Gadfly 1?")
p("""Gadfly 1.0 is an SQL based relational database implementation
implemented entirely in the Python programming language, with
optional fast data structure accellerators implemented in the
C programming language. Gadfly is relatively small, highly portable,
very easy to use (especially for programmers with previous experience
with SQL databases such as MS Access or Oracle), and reasonably
fast (especially when the kjbuckets C accellerators are used).
For moderate sized problems Gadfly offers a fairly complete
set of features such as transaction semantics, failure recovery,
and a TCP/IP based client/server mode (Please see [Gadfly] for
detailed discussion).""")
header("Why Gadfly 2?")
p("""Gadfly 1.0 also has significant limitations. An active Gadfly
1.0 database keeps all data in (virtual) memory, and hence a Gadfly
1.0 database is limited in size to available virtual memory. Important
features such as date/time/interval operations, regular expression
matching and other standard SQL features are not implemented in
Gadfly 1.0. The optimizer and the query evaluator perform optimizations
using properties of the equality predicate but do not optimize
using properties of inequalities such as BETWEEN or less-than.
It is possible to add "extension views" to a Gadfly
1.0 database, but the mechanism is somewhat clumsy and indices
over extension views are not well supported. The features of Gadfly
2.0 discussed here attempt to address these deficiencies by providing
a uniform extension model that permits addition of alternate table,
function, and predicate implementations.""")
p("""Other deficiencies, such as missing constructs like "ALTER
TABLE" and the lack of outer joins and NULL values are not
addressed here, although they may be addressed in Gadfly 2.0 or
a later release. This paper also does not intend to explain
the complete operations of the internals; it is intended to provide
at least enough information to understand the basic mechanisms
for extending gadfly.""")
p("""Some concepts and definitions provided next help with the description
of the gadfly interfaces. [Note: due to the terseness of this
format the ensuing is not a highly formal presentation, but attempts
to approach precision where precision is important.]""")
header("The semilattice of substitutions")
p("""Underlying the gadfly implementation are the basic concepts
associated with substitutions. A substitution is a mapping
of attribute names to values (implemented in gadfly using kjbuckets.kjDict
objects). Here an attribute refers to some sort of "descriptive
variable", such as NAME and a value is an assignment for that variable,
like "Dave Ascher". In Gadfly a table is implemented as a sequence
of substitutions, and substitutions are used in many other ways as well.
""")
p("""
For example consider the substitutions""")
pre("""
A = [DRINKER=>'sam']
B = [DRINKER=>'sam', BAR=>'cheers']
C = [DRINKER=>'woody', BEER=>'bud']
D = [DRINKER=>'sam', BEER=>'mickies']
E = [DRINKER=>'sam', BAR=>'cheers', BEER=>'mickies']
F = [DRINKER=>'sam', BEER=>'mickies']
G = [BEER=>'bud', BAR=>'lolas']
H = [] # the empty substitution
I = [BAR=>'cheers', CAPACITY=>300]""")
p("""A trivial but important observation is that since substitutions
are mappings, no attribute can assume more than one value in a
substitution. In the operations described below whenever an operator
"tries" to assign more than one value to an attribute
the operator yields an "overdefined" or "inconsistent"
result.""")
header("Information Semi-order:")
p("""Substitution B is said to be
more informative than A because B agrees with all assignments
in A (in addition to providing more information as well). Similarly
we say that E is more informative than A, B, D, F. and H but E
is not more informative than the others since, for example G disagrees
with E on the value assigned to the BEER attribute and I provides
additional CAPACITY information not provided in E.""")
header("Joins and Inconsistency:")
p("""A join of two substitutions
X and Y is the least informative substitution Z such that Z is
more informative (or equally informative) than both X and Y. For
example B is the join of B with A, E is the join of B with D and""")
pre("""
E join I =
[DRINKER=>'sam', BAR=>'cheers', BEER=>'mickies', CAPACITY=>300]""")
p("""For any two substitutions either (1) they disagree on the value
assigned to some attribute and have no join or (2) they agree
on all common attributes (if there are any) and their join is
the union of all (name, value) assignments in both substitutions.
Written in terms of kjbucket.kjDict operations two kjDicts X and
Y have a join Z = (X+Y) if and only if Z.Clean() is not None.
Two substitutions that have no join are said to be inconsistent.
For example I and G are inconsistent since they disagree on
the value assigned to the BAR attribute and therefore have no
join. The algebra of substitutions with joins technically defines
an abstract algebraic structure called a semilattice.""")
header("Name space remapping")
p("""Another primitive operation over substitutions is the remap
operation S2 = S.remap(R) where S is a substitution and R is a
graph of attribute names and S2 is a substitution. This operation
is defined to produce the substitution S2 such that""")
pre("""
Name=>Value in S2 if and only if
Name1=>Value in S and Name<=Name1 in R
""")
p("""or if there is no such substitution S2 the remap value is said
to be overdefined.""")
p("""For example the remap operation may be used to eliminate attributes
from a substitution. For example""")
pre("""
E.remap([DRINKER<=DRINKER, BAR<=BAR])
= [DRINKER=>'sam', BAR=>'cheers']
""")
p("""Illustrating that remapping using the [DRINKER<=DRINKER,
BAR<=BAR] graph eliminates all attributes except DRINKER and
BAR, such as BEER. More generally remap can be used in this way
to implement the classical relational projection operation. (See [Korth and Silberschatz]
for a detailed discussion of the projection operator and other relational
algebra operators such as selection, rename, difference and joins.)""")
p("""The remap operation can also be used to implement "selection
on attribute equality". For example if we are interested
in the employee names of employees who are their own bosses we
can use the remapping graph""")
pre("""
R1 = [NAME<=NAME, NAME<=BOSS]
""")
p("""and reject substitutions where remapping using R1 is overdefined.
For example""")
pre("""
S1 = [NAME=>'joe', BOSS=>'joe']
S1.remap(R1) = [NAME=>'joe']
S2 = [NAME=>'fred', BOSS=>'joe']
S2.remap(R1) is overdefined.
""")
p("""The last remap is overdefined because the NAME attribute cannot
assume both the values 'fred' and 'joe' in a substitution.""")
p("""Furthermore, of course, the remap operation can be used to
"rename attributes" or "copy attribute values"
in substitutions. Note below that the missing attribute CAPACITY
in B is effectively ignored in the remapping operation.""")
pre("""
B.remap([D<=DRINKER, B<=BAR, B2<=BAR, C<=CAPACITY])
= [D=>'sam', B=>'cheers', B2=>'cheers']
""")
p("""More interestingly, a single remap operation can be used to
perform a combination of renaming, projection, value copying,
and attribute equality selection as one operation. In kjbuckets the remapper
graph is implemented using a kjbuckets.kjGraph and the remap operation
is an intrinsic method of kjbuckets.kjDict objects.""")
header("Generalized Table Joins and the Evaluator Mainloop""")
p("""Strictly speaking the Gadfly 2.0 query evaluator only uses
the join and remap operations as its "basic assembly language"
-- all other computations, including inequality comparisons and
arithmetic, are implemented externally to the evaluator as "generalized
table joins." """)
p("""A table is a sequence of substitutions (which in keeping with
SQL semantics may contain redundant entries). The join between
two tables T1 and T2 is the sequence of all possible defined joins
between pairs of elements from the two tables. Procedurally we
might compute the join as""")
pre("""
T1JoinT2 = empty
for t1 in T1:
for t2 in T2:
if t1 join t2 is defined:
add t1 join t2 to T1joinT2""")
p("""In general circumstances this intuitive implementation is a
very inefficient way to compute the join, and Gadfly almost always
uses other methods, particularly since, as described below, a
"generalized table" can have an "infinite"
number of entries.""")
p("""For an example of a table join consider the EMPLOYEES table
containing""")
pre("""
[NAME=>'john', JOB=>'executive']
[NAME=>'sue', JOB=>'programmer']
[NAME=>'eric', JOB=>'peon']
[NAME=>'bill', JOB=>'peon']
""")
p("""and the ACTIVITIES table containing""")
pre("""
[JOB=>'peon', DOES=>'windows']
[JOB=>'peon', DOES=>'floors']
[JOB=>'programmer', DOES=>'coding']
[JOB=>'secretary', DOES=>'phone']""")
p("""then the join between EMPLOYEES and ACTIVITIES must containining""")
pre("""
[NAME=>'sue', JOB=>'programmer', DOES=>'coding']
[NAME=>'eric', JOB=>'peon', DOES=>'windows']
[NAME=>'bill', JOB=>'peon', DOES=>'windows']
[NAME=>'eric', JOB=>'peon', DOES=>'floors']
[NAME=>'bill', JOB=>'peon', DOES=>'floors']""")
p("""A compiled gadfly subquery ultimately appears to the evaluator
as a sequence of generalized tables that must be joined (in combination
with certain remapping operations that are beyond the scope of
this discussion). The Gadfly mainloop proceeds following the very
loose pseudocode:""")
pre("""
Subs = [ [] ] # the unary sequence containing "true"
While some table hasn't been chosen yet:
Choose an unchosen table with the least cost join estimate.
Subs = Subs joined with the chosen table
return Subs""")
p("""[Note that it is a property of the join operation that the
order in which the joins are carried out will not affect the result,
so the greedy strategy of evaluating the "cheapest join next"
will not effect the result. Also note that the treatment of logical
OR and NOT as well as EXIST, IN, UNION, and aggregation and so
forth are not discussed here, even though they do fit into this
approach.]""")
p("""The actual implementation is a bit more complex than this,
but the above outline may provide some useful intuition. The "cost
estimation" step and the implementation of the join operation
itself are left up to the generalized table object implementation.
A table implementation has the ability to give an "infinite"
cost estimate, which essentially means "don't join me in
yet under any circumstances." """)
header("Implementing Functions")
p("""As mentioned above operations such as arithmetic are implemented
using generalized tables. For example the arithmetic Add operation
is implemented in Gadfly internally as an "infinite generalized
table" containing all possible substitutions""")
pre("""
ARG0=>a, ARG1=>b, RESULT=>a+b]
""")
p("""Where a and b are all possible values which can be summed.
Clearly, it is not possible to enumerate this table, but given
a sequence of substitutions with defined values for ARG0 and ARG1
such as""")
pre("""
[ARG0=>1, ARG1=-4]
[ARG0=>2.6, ARG1=50]
[ARG0=>99, ARG1=1]
""")
p("""it is possible to implement a "join operation" against
this sequence that performs the same augmentation as a join with
the infinite table defined above:""")
pre("""
[ARG0=>1, ARG1=-4, RESULT=-3]
[ARG0=>2.6, ARG1=50, RESULT=52.6]
[ARG0=>99, ARG1=1, RESULT=100]
""")
p("""Furthermore by giving an "infinite estimate" for
all attempts to evaluate the join where ARG0 and ARG1 are not
available the generalized table implementation for the addition
operation can refuse to compute an "infinite join." """)
p("""More generally all functions f(a,b,c,d) are represented in
gadfly as generalized tables containing all possible relevant
entries""")
pre("""
[ARG0=>a, ARG1=>b, ARG2=>c, ARG3=>d, RESULT=>f(a,b,c,d)]""")
p("""and the join estimation function refuses all attempts to perform
a join unless all the arguments are provided by the input substitution
sequence.""")
header("Implementing Predicates")
p("""Similarly to functions, predicates such as less-than and BETWEEN
and LIKE are implemented using the generalized table mechanism.
For example the "x BETWEEN y AND z" predicate is implemented
as a generalized table "containing" all possible""")
pre("""
[ARG0=>a, ARG1=>b, ARG2=>c]""")
p("""where b<a<c. Furthermore joins with this table are not
permitted unless all three arguments are available in the sequence
of input substitutions.""")
header("Some Gadfly extension interfaces")
p("""A gadfly database engine may be extended with user defined
functions, predicates, and alternative table and index implementations.
This section snapshots several Gadfly 2.0 interfaces, currently under
development and likely to change before the package is released.""")
p("""The basic interface for adding functions and predicates (logical tests)
to a gadfly engine are relatively straightforward. For example to add the
ability to match a regular expression within a gadfly query use the
following implementation.""")
pre("""
from re import match
def addrematch(gadflyinstance):
gadflyinstance.add_predicate("rematch", match)
""")
p("""
Then upon connecting to the database execute
""")
pre("""
g = gadfly(...)
...
addrematch(g)
""")
p("""
In this case the "semijoin operation" associated with the new predicate
"rematch" is automatically generated, and after the add_predicate
binding operation the gadfly instance supports queries such as""")
pre("""
select drinker, beer
from likes
where rematch('b*', beer) and drinker not in
(select drinker from frequents where rematch('c*', bar))
""")
p("""
By embedding the "rematch" operation within the query the SQL
engine can do "more work" for the programmer and reduce or eliminate the
need to process the query result externally to the engine.
""")
p("""
In a similar manner functions may be added to a gadfly instance,""")
pre("""
def modulo(x,y):
return x % y
def addmodulo(gadflyinstance):
gadflyinstance.add_function("modulo", modulo)
...
g = gadfly(...)
...
addmodulo(g)
""")
p("""
Then after the binding the modulo function can be used whereever
an SQL expression can occur.
""")
p("""
Adding alternative table implementations to a Gadfly instance
is more interesting and more difficult. An "extension table" implementation
must conform to the following interface:""")
pre("""
# get the kjbuckets.kjSet set of attribute names for this table
names = table.attributes()
# estimate the difficulty of evaluating a join given known attributes
# return None for "impossible" or n>=0 otherwise with larger values
# indicating greater difficulty or expense
estimate = table.estimate(known_attributes)
# return the join of the rows of the table with
# the list of kjbuckets.kjDict mappings as a list of mappings.
resultmappings = table.join(listofmappings)
""")
p("""
In this case add the table to a gadfly instance using""")
pre("""
gadflyinstance.add_table("table_name", table)
""")
p("""
For example to add a table which automatically queries filenames
in the filesystems of the host computer a gadfly instance could
be augmented with a GLOB table implemented using the standard
library function glob.glob as follows:""")
pre("""
import kjbuckets
class GlobTable:
def __init__(self): pass
def attributes(self):
return kjbuckets.kjSet("PATTERN", "NAME")
def estimate(self, known_attributes):
if known_attributes.member("PATTERN"):
return 66 # join not too difficult
else:
return None # join is impossible (must have PATTERN)
def join(self, listofmappings):
from glob import glob
result = []
for m in listofmappings:
pattern = m["PATTERN"]
for name in glob(pattern):
newmapping = kjbuckets.kjDict(m)
newmapping["NAME"] = name
if newmapping.Clean():
result.append(newmapping)
return result
...
gadfly_instance.add_table("GLOB", GlobTable())
""")
p("""
Then one could formulate queries such as "list the files in directories
associated with packages installed by guido"
""")
pre("""
select g.name as filename
from packages p, glob g
where p.installer = 'guido' and g.pattern=p.root_directory
""")
p("""
Note that conceptually the GLOB table is an infinite table including
all filenames on the current computer in the "NAME" column, paired with
a potentially infinite number of patterns.
""")
p("""
More interesting examples would allow queries to remotely access
data served by an HTTP server, or from any other resource.
""")
p("""
Furthermore an extension table can be augmented with update methods
""")
pre("""
table.insert_rows(listofmappings)
table.update_rows(oldlist, newlist)
table.delete_rows(oldlist)
""")
p("""
Note: at present the implementation does not enforce recovery or
transaction semantics for updates to extension tables, although this
may change in the final release.
""")
p("""
The table implementation is free to provide its own implementations of
indices which take advantage of data provided by the join argument.
""")
header("Efficiency Notes")
p("""The following thought experiment attempts to explain why the
Gadfly implementation is surprisingly fast considering that it
is almost entirely implemented in Python (an interpreted programming
language which is not especially fast when compared to alternatives).
Although Gadfly is quite complex, at an abstract level the process
of query evaluation boils down to a series of embedded loops.
Consider the following nested loops:""")
pre("""
iterate 1000:
f(...) # fixed cost of outer loop
iterate 10:
g(...) # fixed cost of middle loop
iterate 10:
# the real work (string parse, matrix mul, query eval...)
h(...)""")
p("""In my experience many computations follow this pattern where
f, g, are complex, dynamic, special purpose and h is simple, general
purpose, static. Some example computations that follow this pattern
include: file massaging (perl), matrix manipulation (python, tcl),
database/cgi page generation, and vector graphics/imaging.""")
p("""Suppose implementing f, g, h in python is easy but result in
execution times10 times slower than a much harder implementation
in C, choosing arbitrary and debatable numbers assume each function
call consumes 1 tick in C, 5 ticks in java, 10 ticks in python
for a straightforward implementation of each function f, g, and
h. Under these conditions we get the following cost analysis,
eliminating some uninteresting combinations, of implementing the
function f, g, and h in combinations of Python, C and java:""")
pre("""
COST | FLANG | GLANG | HLANG
==================================
111000 | C | C | C
115000 | java | C | C
120000 | python | C | C
155000 | java | java | C
210000 | python | python | C
555000 | java | java | java
560000 | python | java | java
610000 | python | python | java
1110000 | python | python | python
""")
p("""Note that moving only the innermost loop to C (python/python/C)
speeds up the calculation by half an order of magnitude compared
to the python-only implementation and brings the speed to within
a factor of 2 of an implementation done entirely in C.""")
p("""Although this artificial and contrived thought experiment is
far from conclusive, we may be tempted to draw the conclusion
that generally programmers should focus first on obtaining a working
implementation (because as John Ousterhout is reported to have
said "the biggest performance improvement is the transition
from non-working to working") using the methodology that
is most likely to obtain a working solution the quickest (Python). Only then if the performance
is inadequate should the programmer focus on optimizing
the inner most loops, perhaps moving them to a very efficient
implementation (C). Optimizing the outer loops will buy little
improvement, and should be done later, if ever.""")
p("""This was precisely the strategy behind the gadfly implementations,
where most of the inner loops are implemented in the kjbuckets
C extension module and the higher level logic is all in Python.
This also explains why gadfly appears to be "slower"
for simple queries over small data sets, but seems to be relatively
"faster" for more complex queries over larger data sets,
since larger queries and data sets take better advantage of the
optimized inner loops.""")
header("A Gadfly variant for OLAP?")
p("""In private correspondence Andy Robinson points out that the
basic logical design underlying Gadfly could be adapted to provide
Online Analytical Processing (OLAP) and other forms of data warehousing
and data mining. Since SQL is not particularly well suited for
the kinds of requests common in these domains the higher level
interfaces would require modification, but the underlying logic
of substitutions and name mappings seems to be appropriate.""")
header("Conclusion")
p("""The revamped query engine design in Gadfly 2 supports
a flexible and general extension methodology that permits programmers
to extend the gadfly engine to include additional computations
and access to remote data sources. Among other possibilities this
will permit the gadfly engine to make use of disk based indexed
tables and to dynamically retrieve information from remote data
sources (such as an Excel spreadsheet or an Oracle database).
These features will make gadfly a very useful tool for data manipulation
and integration.""")
header("References")
p("""[Van Rossum] Van Rossum, Python Reference Manual, Tutorial, and Library Manuals,
please look to http://www.python.org
for the latest versions, downloads and links to printed versions.""")
p("""[O'Neill] O'Neill, P., Data Base Principles, Programming, Performance,
Morgan Kaufmann Publishers, San Francisco, 1994.""")
p("""[Korth and Silberschatz] Korth, H. and Silberschatz, A. and Sudarshan, S.
Data Base System Concepts, McGraw-Hill Series in Computer Science, Boston,
1997""")
p("""[Gadfly]Gadfly: SQL Relational Database in Python,
http://www.chordate.com/kwParsing/gadfly.html""")
go()
| mit |
Plantain/sms-mailinglist | lib/googlecloudapis/resourceviews/v1beta1/resourceviews_v1beta1_messages.py | 5 | 15478 | """Generated message classes for resourceviews version v1beta1.
The Resource View API allows users to create and manage logical sets of Google
Compute Engine instances.
"""
from protorpc import messages
package = 'resourceviews'
class Label(messages.Message):
"""The Label to be applied to the resource views.
Fields:
key: Key of the label.
value: Value of the label.
"""
key = messages.StringField(1)
value = messages.StringField(2)
class RegionViewsAddResourcesRequest(messages.Message):
"""The request to add resources to the resource view.
Fields:
resources: The list of resources to be added.
"""
resources = messages.StringField(1, repeated=True)
class RegionViewsInsertResponse(messages.Message):
"""The response to a resource view insert request.
Fields:
resource: The resource view object inserted.
"""
resource = messages.MessageField('ResourceView', 1)
class RegionViewsListResourcesResponse(messages.Message):
"""The response to the list resource request.
Fields:
members: The resources in the view.
nextPageToken: A token used for pagination.
"""
members = messages.StringField(1, repeated=True)
nextPageToken = messages.StringField(2)
class RegionViewsListResponse(messages.Message):
"""The response to the list resource view request.
Fields:
nextPageToken: A token used for pagination.
resourceViews: The list of resource views that meet the criteria.
"""
nextPageToken = messages.StringField(1)
resourceViews = messages.MessageField('ResourceView', 2, repeated=True)
class RegionViewsRemoveResourcesRequest(messages.Message):
"""The request to remove resources from the resource view.
Fields:
resources: The list of resources to be removed.
"""
resources = messages.StringField(1, repeated=True)
class ResourceView(messages.Message):
"""The resource view object.
Fields:
creationTime: The creation time of the resource view.
description: The detailed description of the resource view.
id: [Output Only] The ID of the resource view.
kind: Type of the resource.
labels: The labels for events.
lastModified: The last modified time of the view. Not supported yet.
members: A list of all resources in the resource view.
name: The name of the resource view.
numMembers: The total number of resources in the resource view.
selfLink: [Output Only] A self-link to the resource view.
"""
creationTime = messages.StringField(1)
description = messages.StringField(2)
id = messages.StringField(3)
kind = messages.StringField(4, default=u'resourceviews#resourceView')
labels = messages.MessageField('Label', 5, repeated=True)
lastModified = messages.StringField(6)
members = messages.StringField(7, repeated=True)
name = messages.StringField(8)
numMembers = messages.IntegerField(9, variant=messages.Variant.UINT32)
selfLink = messages.StringField(10)
class ResourceviewsRegionViewsAddresourcesRequest(messages.Message):
"""A ResourceviewsRegionViewsAddresourcesRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
regionViewsAddResourcesRequest: A RegionViewsAddResourcesRequest resource
to be passed as the request body.
resourceViewName: The name of the resource view.
"""
projectName = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
regionViewsAddResourcesRequest = messages.MessageField('RegionViewsAddResourcesRequest', 3)
resourceViewName = messages.StringField(4, required=True)
class ResourceviewsRegionViewsAddresourcesResponse(messages.Message):
"""An empty ResourceviewsRegionViewsAddresources response."""
class ResourceviewsRegionViewsDeleteRequest(messages.Message):
"""A ResourceviewsRegionViewsDeleteRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceViewName: The name of the resource view.
"""
projectName = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
resourceViewName = messages.StringField(3, required=True)
class ResourceviewsRegionViewsDeleteResponse(messages.Message):
"""An empty ResourceviewsRegionViewsDelete response."""
class ResourceviewsRegionViewsGetRequest(messages.Message):
"""A ResourceviewsRegionViewsGetRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceViewName: The name of the resource view.
"""
projectName = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
resourceViewName = messages.StringField(3, required=True)
class ResourceviewsRegionViewsInsertRequest(messages.Message):
"""A ResourceviewsRegionViewsInsertRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceView: A ResourceView resource to be passed as the request body.
"""
projectName = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
resourceView = messages.MessageField('ResourceView', 3)
class ResourceviewsRegionViewsListRequest(messages.Message):
"""A ResourceviewsRegionViewsListRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
region: The region name of the resource view.
"""
maxResults = messages.IntegerField(1, variant=messages.Variant.INT32, default=5000)
pageToken = messages.StringField(2)
projectName = messages.StringField(3, required=True)
region = messages.StringField(4, required=True)
class ResourceviewsRegionViewsListresourcesRequest(messages.Message):
"""A ResourceviewsRegionViewsListresourcesRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceViewName: The name of the resource view.
"""
maxResults = messages.IntegerField(1, variant=messages.Variant.INT32, default=5000)
pageToken = messages.StringField(2)
projectName = messages.StringField(3, required=True)
region = messages.StringField(4, required=True)
resourceViewName = messages.StringField(5, required=True)
class ResourceviewsRegionViewsRemoveresourcesRequest(messages.Message):
"""A ResourceviewsRegionViewsRemoveresourcesRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
regionViewsRemoveResourcesRequest: A RegionViewsRemoveResourcesRequest
resource to be passed as the request body.
resourceViewName: The name of the resource view.
"""
projectName = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
regionViewsRemoveResourcesRequest = messages.MessageField('RegionViewsRemoveResourcesRequest', 3)
resourceViewName = messages.StringField(4, required=True)
class ResourceviewsRegionViewsRemoveresourcesResponse(messages.Message):
"""An empty ResourceviewsRegionViewsRemoveresources response."""
class ResourceviewsZoneViewsAddresourcesRequest(messages.Message):
"""A ResourceviewsZoneViewsAddresourcesRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
zoneViewsAddResourcesRequest: A ZoneViewsAddResourcesRequest resource to
be passed as the request body.
"""
projectName = messages.StringField(1, required=True)
resourceViewName = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
zoneViewsAddResourcesRequest = messages.MessageField('ZoneViewsAddResourcesRequest', 4)
class ResourceviewsZoneViewsAddresourcesResponse(messages.Message):
"""An empty ResourceviewsZoneViewsAddresources response."""
class ResourceviewsZoneViewsDeleteRequest(messages.Message):
"""A ResourceviewsZoneViewsDeleteRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
"""
projectName = messages.StringField(1, required=True)
resourceViewName = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ResourceviewsZoneViewsDeleteResponse(messages.Message):
"""An empty ResourceviewsZoneViewsDelete response."""
class ResourceviewsZoneViewsGetRequest(messages.Message):
"""A ResourceviewsZoneViewsGetRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
"""
projectName = messages.StringField(1, required=True)
resourceViewName = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ResourceviewsZoneViewsInsertRequest(messages.Message):
"""A ResourceviewsZoneViewsInsertRequest object.
Fields:
projectName: The project name of the resource view.
resourceView: A ResourceView resource to be passed as the request body.
zone: The zone name of the resource view.
"""
projectName = messages.StringField(1, required=True)
resourceView = messages.MessageField('ResourceView', 2)
zone = messages.StringField(3, required=True)
class ResourceviewsZoneViewsListRequest(messages.Message):
"""A ResourceviewsZoneViewsListRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
zone: The zone name of the resource view.
"""
maxResults = messages.IntegerField(1, variant=messages.Variant.INT32, default=5000)
pageToken = messages.StringField(2)
projectName = messages.StringField(3, required=True)
zone = messages.StringField(4, required=True)
class ResourceviewsZoneViewsListresourcesRequest(messages.Message):
"""A ResourceviewsZoneViewsListresourcesRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
"""
maxResults = messages.IntegerField(1, variant=messages.Variant.INT32, default=5000)
pageToken = messages.StringField(2)
projectName = messages.StringField(3, required=True)
resourceViewName = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ResourceviewsZoneViewsRemoveresourcesRequest(messages.Message):
"""A ResourceviewsZoneViewsRemoveresourcesRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
zoneViewsRemoveResourcesRequest: A ZoneViewsRemoveResourcesRequest
resource to be passed as the request body.
"""
projectName = messages.StringField(1, required=True)
resourceViewName = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
zoneViewsRemoveResourcesRequest = messages.MessageField('ZoneViewsRemoveResourcesRequest', 4)
class ResourceviewsZoneViewsRemoveresourcesResponse(messages.Message):
"""An empty ResourceviewsZoneViewsRemoveresources response."""
class StandardQueryParameters(messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" or "email:<ldap>" to
include in api requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = messages.StringField(2)
key = messages.StringField(3)
oauth_token = messages.StringField(4)
prettyPrint = messages.BooleanField(5, default=True)
quotaUser = messages.StringField(6)
trace = messages.StringField(7)
userIp = messages.StringField(8)
class ZoneViewsAddResourcesRequest(messages.Message):
"""The request to add resources to the resource view.
Fields:
resources: The list of resources to be added.
"""
resources = messages.StringField(1, repeated=True)
class ZoneViewsInsertResponse(messages.Message):
"""The response to an insert request.
Fields:
resource: The resource view object that has been inserted.
"""
resource = messages.MessageField('ResourceView', 1)
class ZoneViewsListResourcesResponse(messages.Message):
"""The response to a list resource request.
Fields:
members: The full URL of resources in the view.
nextPageToken: A token used for pagination.
"""
members = messages.StringField(1, repeated=True)
nextPageToken = messages.StringField(2)
class ZoneViewsListResponse(messages.Message):
"""The response to a list request.
Fields:
nextPageToken: A token used for pagination.
resourceViews: The result that contains all resource views that meet the
criteria.
"""
nextPageToken = messages.StringField(1)
resourceViews = messages.MessageField('ResourceView', 2, repeated=True)
class ZoneViewsRemoveResourcesRequest(messages.Message):
"""The request to remove resources from the resource view.
Fields:
resources: The list of resources to be removed.
"""
resources = messages.StringField(1, repeated=True)
| apache-2.0 |
John-NY/overo-oe | contrib/opie/opie_checksum_rewrite.py | 26 | 2820 | #!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# Opie recipe checksum rewriter
#
# A crude script for rewriting recipes to contain checksum information
#
# Some portions copied from oe-source-checker.py, copyright (C) 2007 OpenedHand
import os
import sys
def rewrite(recpfilename, sourcedir):
insrc = False
srcfirst = False
sums = ''
appname = ''
output = ''
f = open(recpfilename, 'r')
for line in f:
if line.startswith('require '):
pn = os.path.basename(recpfilename)
pn = pn[0:pn.find("_")]
incfilename = line[8:].strip().replace("${PN}", pn)
f2 = open(os.path.join(os.path.dirname(recpfilename), incfilename))
for line2 in f2:
if line2.startswith('APPNAME '):
appname = line2[line2.find('"'):].strip('\n\r"')
f2.close()
output = output + line
continue
if line.startswith('SRC_URI['):
continue
if line.startswith('APPNAME '):
appname = line[line.find('"'):].strip('\n\r"')
output = output + line
continue
if not insrc and line.startswith('SRC_URI '):
insrc = True
srcfirst = True
if insrc:
pos = line.find('-split_')
pos2 = line.find('.tar.bz2')
if pos > -1 and pos2 > -1:
name = line[pos+1:pos2]
name = name.replace('${APPNAME}', 'appname')
output = output + line.replace('.tar.bz2', '.tar.bz2;name=%s' % name)
filename = line.strip('\n\r\t "\\').replace('${APPNAME}', appname)
if srcfirst:
filename = filename[filename.find('"')+1:]
filename = filename.replace('http://sources.openembedded.org/', '')
localpath = os.path.join(sourcedir, filename)
if not os.path.isfile(localpath):
raise IOError("file %s not found" % localpath)
md5pipe = os.popen('md5sum ' + localpath)
md5data = (md5pipe.readline().split() or [ "" ])[0]
md5pipe.close()
shapipe = os.popen('sha256sum ' + localpath)
shadata = (shapipe.readline().split() or [ "" ])[0]
shapipe.close()
sums = sums + 'SRC_URI[%s.md5sum] = "%s"\n' % (name, md5data)
sums = sums + 'SRC_URI[%s.sha256sum] = "%s"\n' % (name, shadata)
else:
output = output + line
if (srcfirst and line.count('"') > 1) or (not srcfirst and line.find('"') > -1):
insrc = False
if sums:
output = output + sums
srcfirst = False
else:
output = output + line
f.close()
f = open(recpfilename, 'w')
f.write(output)
f.close()
if len(sys.argv) < 3:
print """syntax: %s recipe dl_dir
recipe - recipe.bb file
dl_dir - location of local source files""" % sys.argv[0]
sys.exit(1)
recipe = sys.argv[1]
dl_dir = sys.argv[2]
if not os.path.isfile(recipe):
print >> sys.stderr, "%s: recipe file %s not found" % recipe
sys.exit(1)
if not os.path.isdir(dl_dir):
print >> sys.stderr, "%s: source dir %s not found" % dl_dir
sys.exit(1)
rewrite(recipe, dl_dir)
| mit |
ArcherSys/ArcherSys | entertainment/gheddobox/Lib/site-packages/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| mit |
chrisidefix/devide.johannes | extra/soappy-cvp/bid/inventoryClient.py | 8 | 9035 | #!/usr/bin/env python
import getopt
import sys
import string
import re
import time
sys.path.insert(1,"..")
from SOAPpy import SOAP
import traceback
DEFAULT_SERVERS_FILE = './inventory.servers'
DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping')
def usage (error = None):
sys.stdout = sys.stderr
if error != None:
print error
print """usage: %s [options] [server ...]
If a long option shows an argument is mandatory, it's mandatory for the
equivalent short option also.
-?, --help display this usage
-d, --debug turn on debugging in the SOAP library
-i, --invert test servers *not* in the list of servers given
-m, --method=METHOD#[,METHOD#...]
call only the given methods, specify a METHOD# of ?
for the list of method numbers
-o, --output=TYPE turn on output, TYPE is one or more of s(uccess),
f(ailure), n(ot implemented), F(ailed (as expected)),
a(ll)
[f]
-s, --servers=FILE use FILE as list of servers to test [%s]
-t, --stacktrace print a stack trace on each unexpected failure
-T, --always-stacktrace
print a stack trace on any failure
""" % (sys.argv[0], DEFAULT_SERVERS_FILE),
sys.exit (0)
def methodUsage ():
sys.stdout = sys.stderr
print "Methods are specified by number. Multiple methods can be " \
"specified using a\ncomma-separated list of numbers or ranges. " \
"For example 1,4-6,8 specifies\nmethods 1, 4, 5, 6, and 8.\n"
print "The available methods are:\n"
half = (len (DEFAULT_METHODS) + 1) / 2
for i in range (half):
print "%4d. %-25s" % (i + 1, DEFAULT_METHODS[i]),
if i + half < len (DEFAULT_METHODS):
print "%4d. %-25s" % (i + 1 + half, DEFAULT_METHODS[i + half]),
print
sys.exit (0)
def readServers (file):
servers = []
f = open (file, 'r')
while 1:
line = f.readline ()
if line == '':
break
if line[0] in ('#', '\n') or line[0] in string.whitespace:
continue
cur = {'nonfunctional': {}}
tag = None
servers.append (cur)
while 1:
if line[0] in string.whitespace:
if tag == 'nonfunctional':
value = method + ' ' + cur[tag][method]
else:
value = cur[tag]
value += ' ' + line.strip ()
else:
tag, value = line.split (':', 1)
tag = tag.strip ().lower ()
value = value.strip ()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if tag == 'nonfunctional':
value = value.split (' ', 1) + ['']
method = value[0]
cur[tag][method] = value[1]
else:
cur[tag] = value
line = f.readline ()
if line == '' or line[0] == '\n':
break
return servers
def str2list (s):
l = {}
for i in s.split (','):
if i.find ('-') != -1:
i = i.split ('-')
for i in range (int (i[0]),int (i[1]) + 1):
l[i] = 1
else:
l[int (i)] = 1
l = l.keys ()
l.sort ()
return l
def SimpleBuy(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'SimpleBuy'})
return serv.SimpleBuy(ProductName="widget", Quantity = 50, Address = "this is my address") #JHawk, Phalanx require this order of params
def RequestForQuote(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'RequestForQuote'})
return serv.RequestForQuote(Quantity=3, ProductName = "thing") # for Phalanx, JHawk
def Buy(serv, sa, epname):
import copy
serv = serv._sa (sa % {'methodname':'Buy'})
billTo_d = {"name":"Buyer One", "address":"1 1st Street",
"city":"New York", "state":"NY", "zipCode":"10000"}
shipTo_d = {"name":"Buyer One ", "address":"1 1st Street ",
"city":"New York ", "state":"NY ", "zipCode":"10000 "}
for k,v in shipTo_d.items():
shipTo_d[k] = v[:-1]
itemd1 = SOAP.structType( {"name":"widg1","quantity":200,"price":SOAP.decimalType(45.99), "_typename":"LineItem"})
itemd2 = SOAP.structType( {"name":"widg2","quantity":400,"price":SOAP.decimalType(33.45), "_typename":"LineItem"})
items_d = SOAP.arrayType( [itemd1, itemd2] )
items_d._ns = "http://www.soapinterop.org/Bid"
po_d = SOAP.structType( data = {"poID":"myord","createDate":SOAP.dateTimeType(),"shipTo":shipTo_d, "billTo":billTo_d, "items":items_d})
try:
# it's called PO by MST (MS SOAP Toolkit), JHawk (.NET Remoting),
# Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft,
# HP, EasySoap, and Jake (Frontier). [Actzero accepts either]
return serv.Buy(PO=po_d)
except:
# called PurchaseOrder by KeithBa
return serv.Buy(PurchaseOrder=po_d)
def Ping(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'Ping'})
return serv.Ping()
def main():
servers = DEFAULT_SERVERS_FILE
methodnums = None
output = 'f'
invert = 0
succeed = 0
printtrace = 0
stats = 1
total = 0
fail = 0
failok = 0
notimp = 0
try:
opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t',
['help', 'method', 'debug', 'invert',
'output', 'servers='])
for opt, arg in opts:
if opt in ('-?', '--help'):
usage ()
elif opt in ('-d', '--debug'):
SOAP.Config.debug = 1
elif opt in ('-i', '--invert'):
invert = 1
elif opt in ('-m', '--method'):
if arg == '?':
methodUsage ()
methodnums = str2list (arg)
elif opt in ('-o', '--output'):
output = arg
elif opt in ('-s', '--servers'):
servers = arg
else:
raise AttributeError, \
"Recognized but unimplemented option `%s'" % opt
except SystemExit:
raise
except:
usage (sys.exc_info ()[1])
if 'a' in output:
output = 'fFns'
servers = readServers(servers)
if methodnums == None:
methodnums = range (1, len (DEFAULT_METHODS) + 1)
limitre = re.compile ('|'.join (args), re.IGNORECASE)
for s in servers:
if (not not limitre.match (s['name'])) == invert:
continue
serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace'])
for num in (methodnums):
if num > len(DEFAULT_METHODS):
break
total += 1
name = DEFAULT_METHODS[num - 1]
title = '%s: %s (#%d)' % (s['name'], name, num)
try:
fn = globals ()[name]
except KeyboardInterrupt:
raise
except:
if 'n' in output:
print title, "test not yet implemented"
notimp += 1
continue
try:
res = fn (serv, s['soapaction'], s['name'])
if s['nonfunctional'].has_key (name):
print title, "succeeded despite marked nonfunctional"
elif 's' in output:
print title, "succeeded "
succeed += 1
except KeyboardInterrupt:
print "fail"
raise
except:
if s['nonfunctional'].has_key (name):
if 'F' in output:
t = 'as expected'
if s['nonfunctional'][name] != '':
t += ', ' + s['nonfunctional'][name]
print title, "failed (%s) -" %t, sys.exc_info()[1]
failok += 1
else:
if 'f' in output:
print title, "failed -", str (sys.exc_info()[1])
fail += 1
if stats:
print " Tests ended at:", time.ctime (time.time())
if stats > 0:
print " Total tests: %d" % total
print " Successes: %d (%3.2f%%)" % \
(succeed, 100.0 * succeed / total)
if stats > 0 or fail > 0:
print "Failed unexpectedly: %d (%3.2f%%)" % \
(fail, 100.0 * fail / total)
if stats > 0:
print " Failed as expected: %d (%3.2f%%)" % \
(failok, 100.0 * failok / total)
if stats > 0 or notimp > 0:
print " Not implemented: %d (%3.2f%%)" % \
(notimp, 100.0 * notimp / total)
return fail + notimp
if __name__ == "__main__":
main()
| bsd-3-clause |
sjthespian/ISYlib-python | ISY/IsyNodeClass.py | 2 | 21244 | """
Devices controlled my the ISY are represented as "nodes" on the ISY device and with Node Objects in the API
There are three types of Node Object:
* IsyNode - Node Object
Represent lights, switches, motion sensors
* IsyScene - Scene Object
Represents Scenes contains Nodes that comprise a "Scene"
* IsyNodeFolder - Can hold Scene's or Nodes
a organizational obj for Scene's and Nodes
Only IsyNode Objects maintain "state"
What states are maintined depend on the physical node device itself
but they can include
- on, off of dim level
- temperature
- wattage
Nodes can have "members" or subnodes
IsyScene Objects can take commands but do not maintin a queryable state
A Scene is predefined state for one or more nodes
scenes can only be comprised of nodes which are call "members"
only nodes can be members of a scene
IsyNodeFolders are just for organizing
Nodes, Scenes and Folders can be members of a Folder
"""
__author__ = 'Peter Shipley <[email protected]>'
__copyright__ = "Copyright (C) 2015 Peter Shipley"
__license__ = "BSD"
import hashlib
from ISY.IsyUtilClass import IsySubClass, val2bool
from ISY.IsyExceptionClass import *
# from IsyClass import *
# from IsyNodeClass import *
# from IsyProgramClass import *
# from IsyVarClass import *
__all__ = ['IsyNode', 'IsyNodeFolder', 'IsyScene']
# library_using_super
class _IsyNodeBase(IsySubClass):
#_objtype = (0, "unknown")
_objtype = "unknown"
def on(self, val=255) :
""" Send On command to a node
args:
optional value for on level
"""
self._on(val, "DON")
def faston(self, val=255) :
""" Send Fast On command to a node
args:
optional value for on level
"""
self._on(val, "DFON")
def _on(self, val, cmd) :
if not str(val).isdigit :
raise IsyTypeError("On Command : Bad Value : node=%s val=%s" %
self._mydict["address"], str(val))
if "property" in self._mydict :
if "ST" in self._mydict["property"] :
self._mydict["property"]["ST"]["value"] = val
self._mydict["property"]["ST"]["formatted"] = "{:.0%}".format(val/255)
self.isy._node_send(self._mydict["address"], "cmd", cmd, val)
def off(self) :
""" Send Off command to a node
args: None
"""
self._off("DOF")
def fastoff(self) :
""" Send Fast Off command to a node
args: None
"""
self._off("DFOF")
def _off(self, cmd="DOF") :
self.isy._node_send(self._mydict["address"], "cmd", cmd)
if "property" in self._mydict :
# self._mydict["property"]["time"] = 0
if "ST" in self._mydict["property"] :
self._mydict["property"]["ST"]["value"] = 0
self._mydict["property"]["ST"]["formatted"] = "Off"
def beep(self) :
self.isy._node_send(self._mydict["address"], "cmd", "BEEP")
def get_spoken(self):
""" get notes property 'spoken' """
return self._get_prop("spoken")
spoken = property(get_spoken)
def get_path(self):
return self.isy._node_get_path(self._mydict['address'], self._objtype)
path = property(get_path)
def members_list(self) :
pass
def member_iter(self, flag=0):
return self.members_list()
def member_list(self):
if 'members' in self._mydict :
# print("mydict['members'] : ", type(self._mydict['members']) )
if type(self._mydict['members']) == 'dict' :
return self._mydict['members'].keys()
# if type(self._mydict['members']) == 'list' :
return self._mydict['members'][:]
return [ ]
def is_dimable(self) :
if 'type' in self._mydict :
a = self._mydict["type"].split('.')
if a[0] == "1" :
return True
return False
dimable = property(is_dimable)
def get_callback(self) :
return self.isy.callback_get(self._mydict["address"])
def set_callback(self, func, *args) :
if func is None :
return self.isy.callback_del(self._mydict["address"])
else :
return self.isy.callback_set(self._mydict["address"], func, args)
callback = property(get_callback, set_callback)
def is_member(self, obj) :
if "members" in self._mydict :
if isinstance(obj, str) :
return obj in self._mydict["members"]
elif isinstance(obj, _IsyNodeBase) :
return obj._get_prop("address") in self._mydict["members"]
return False
def member_add(self, node, flag=0) :
r = self.isy.soapcomm("SetParent",
node=node._get_prop("address"), nodeType=node.nodeType(),
parent=self._mydict["address"], parentType=self.nodeType())
def _rename(self, cmd, newname) :
if self.debug & 0x01 :
print("rename : ", self.__class__.__name__, " : ", newname)
#if not isinstance(newname, str) or len(newname) == 0 :
# print "newname : ", newname
# raise IsyTypeError("rename : name value not str")
r = self.isy.soapcomm(cmd,
id=self._mydict["address"], name=newname )
return r
# check if scene _contains_ node
def __contains__(self, other):
return self.is_member(other)
# check if obj _contains_ attib
# def __contains__(self, other):
# if isinstance(other, str) :
# return other in self._getlist
# else :
# return False
# class MemberDicte(dict):
#
# def __getitem__(self, key):
# val = dict.__getitem__(self, key)
# print 'GET', key
# return val
#
# def __setitem__(self, key, val):
# print 'SET', key, val
# dict.__setitem__(self, key, val)
#
# def __delitem__(self, key):
# print 'DEL', key
# dict.__delitem__(self, key)
#
# def __repr__(self):
# dictrepr = dict.__repr__(self)
# return '%s(%s)' % (type(self).__name__, dictrepr)
#
# def get(self, key, default_val):
# print 'GET', key, default_val
# dict.get(self, key, default_val)
#
# def update(self, *args, **kwargs):
# print 'update', args, kwargs
# for k, v in dict(*args, **kwargs).iteritems():
# self[k] = v
#
# convers a node Id to a int
# eg: "9 4A 5F 2" => 00001001010010100101111100000010 => 155868930
#
def node_id_to_int(h) :
a = h.split(' ')
return ( int(a[0], 16) << 24 ) | ( int(a[1], 16) << 16 ) | \
( int(a[2], 16) << 8 ) | int(a[3], 16)
# def rate
# def onlevel
class IsyNode(_IsyNodeBase):
""" Node Class for ISY
Attributes :
status / ST
ramprate / RR
onlevel / OL
Readonly Attributes :
address
formatted
enabled
pnode
type
name
ELK_ID
flag
funtions:
get_rr:
set_rr:
Bugs: Results are undefined for Node class objects that
represent a deleteed node
"""
_getlist = ['address', 'enabled', 'formatted',
'ELK_ID',
'parent', 'parent-type',
'name', 'pnode', 'flag', 'wattage',
'isLoad', 'location', 'description', 'spoken',
'OL', 'RR', 'ST', 'type']
_setlist = ['RR', 'OL', 'status', 'ramprate', 'onlevel', 'enable']
_propalias = {'status': 'ST', 'value': 'ST', 'val': 'ST',
'id': 'address', 'addr': 'address',
'ramprate': 'RR', 'onlevel': 'OL',
"node-flag": "flag"}
#_boollist = [ "enabled" ]
def __init__(self, isy, ndict) :
# self._objtype = (1, "node")
self._objtype = "node"
self._nodeprops = None
super(self.__class__, self).__init__(isy, ndict)
# if not self.isy.eventupdates :
# #update only nodes
# if "node-flag" in self._mydict :
# self.update()
self._hash = hashlib.sha256(self._mydict["address"])
if self.debug & 0x01 :
print("Init Node : \"" + self._mydict["address"] + \
"\" : \"" + self._mydict["name"] + "\"")
# self.isy._printdict(self.__dict__)
# Special case from BaseClass due to ST/RR/OL props
def _get_prop(self, prop):
# print "IN get_prop ", prop
if prop == "formatted" :
prop = "ST"
value = "formatted"
else :
value = "value"
if prop in self._propalias :
prop = self._propalias[prop]
if not prop in self._getlist :
# if prop in ['parent', 'parent-type'] :
# return None
raise IsyPropertyError("no property Attribute {!s}".format(prop))
# check if we have a property
if prop in ['isLoad', 'location', 'description', 'spoken'] :
if self._nodeprops is None :
self._nodenotes = self.isy.node_get_notes(self._mydict["address"])
if self._nodenotes is None :
return None
if prop in self._nodenotes :
return self._nodenotes[prop]
else :
# return None
return ""
if prop in ['ST', 'OL', 'RR'] :
# Scene's do not have property values
if prop in self._mydict["property"] :
# print self._mydict["property"]
# print "prop value", prop, value
return self._mydict["property"][prop][value]
else :
return None
# if self._mydict["property"]["time"] == 0 :
# self.update()
# elif self.isy.cachetime :
# if time.gmtime() < (self.cachetime + self._mydict["property"]["time"]) :
# self.update()
else :
# if prop in self._mydict :
# if prop in self._boollist :
# return(val2bool(self._mydict[prop]))
# else :
# return self._mydict[prop]
# else :
# return None
return super(self.__class__, self)._get_prop(prop)
def _set_prop(self, prop, new_value):
""" generic property set """
# print "IN set_prop ", prop, new_value
if self.debug & 0x04 :
print("_set_prop ", prop, " : ", new_value)
if prop in self._propalias :
prop = self._propalias[prop]
if not prop in self._setlist :
if prop == "ST" :
self.on(new_value)
return
else :
raise IsyPropertyError("_set_prop : " \
"Invalid property Attribute " + prop)
if prop == 'enable' :
self._mydict[prop] = bool(new_value)
self.isy.node_enable(self._mydict["address"], bool(new_value))
elif prop in ['OL', 'RR'] :
if not str(new_value).isdigit :
raise IsyTypeError("Set Property : Bad Value : node=%s prop=%s val=%s" %
self._mydict["address"], prop, str(new_value))
self.isy._node_send(self._mydict["address"], "set", prop, str(new_value))
# self._mydict["property"]["time"] = 0
if prop in self._mydict["property"] :
# if isinstance(new_value, (int, float)) : # already checked with isdigit
self._mydict["property"][prop]["value"] = new_value
# we need to tie this to some action
elif prop in self._mydict :
# self._mydict[prop] = new_value
pass
else :
#print "_set_prop AttributeError"
raise AttributeError("no Attribute " + prop)
def _gettype(self):
""" Type of Node (readonly) """
return "node"
# enable node
def get_enable(self):
""" get enable/disable status a node """
return self._get_prop("enable")
def set_enable(self, new_bool):
""" Set enable status a node
args:
enable bool
"""
return self._set_prop("enable", new_bool)
enable = property(get_enable, set_enable, None, "enable/disable a node")
def get_wattage(self):
""" get wattage """
return self._get_prop("wattage")
def set_wattage(self, watts):
""" set wattage property """
return self.isy.node_set_powerinfo(self._mydict["address"], wattage=watts)
wattage = property(get_wattage, set_wattage)
# ramprate property
# obj mathod for getting/setting a Node's value
# sets how fast a light fades on.
def get_rr(self):
""" Get/Set RampRate property of Node """
return self._get_prop("RR")
def set_rr(self, new_value):
""" Get/Set RampRate property of Node """
return self._set_prop("RR", new_value)
ramprate = property(get_rr, set_rr)
# On Level property
# obj mathod for getting/setting a Node's value
# where in most cases light is how bright the light is
# when turned on
def get_ol(self):
""" Get/Set On Level property of Node """
return self._get_prop("OL")
def set_ol(self, new_value):
""" Get/Set On Level property of Node """
return self._set_prop("OL", new_value)
onlevel = property(get_ol, set_ol)
# def get_fm(self):
# """ property On Level Value of Node """
# return self._get_prop("formatted")
# formatted = property(get_fm)
# status property
# obj mathod for getting/setting a Node's value
# where in most cases light is how bright the light is
def get_status(self):
""" Get/Set Status property of Node """
return self._get_prop("ST")
def set_status(self, new_value):
""" Get/Set Status property of Node """
return self.on(new_value)
status = property(get_status, set_status)
def dim(self) :
"""
decrease brightness of a device by ~3%
"""
self.isy._node_send(self._mydict["address"], "cmd", "DIM")
def brighten(self) :
"""
increase brightness of a device by ~3%
"""
self.isy._node_send(self._mydict["address"], "cmd", "BRT")
#
# readonly to node attribute
#
def rename(self, newname) :
return self._rename("RenameNode", newname)
#
#
#
def update(self) :
""" force object to manualy update it's propertys """
xurl = "/rest/nodes/" + self._mydict["address"]
if self.debug & 0x01 :
print("_updatenode pre _getXML")
_nodestat = self.isy._getXMLetree(xurl)
# del self._mydict["property"]["ST"]
for prop in _nodestat.iter('property'):
tprop = dict()
for k, v in list(prop.items()) :
tprop[k] = v
if "id" in tprop :
self._mydict["property"][tprop["id"]] = tprop
# self._mydict["property"]["time"] = time.gmtime()
# experimental
def __bool__(self) :
#print "__nonzero__ call", self._mydict["property"]["ST"]["value"], \
# " :: ", int(self._mydict["property"]["ST"]["value"])
return(bool(self._mydict["property"]["ST"]["value"]) > 0)
# use the node address as the hash value
def __hash__(self) :
return(self._hash)
# def __str__(self):
# print "__str__ call"
# return("my str : " + self._mydict["name"])
def __float__(self):
# print "__float__ call"
return float(int(self._mydict["property"]["ST"]["value"]) / float(255))
class IsyScene(_IsyNodeBase):
""" Node Group Class for ISY
writeonly attributes :
status
readonly attributes :
address
name
flag
deviceGroup
parent
parent-type
ELK_ID
"""
_getlist = ['address', 'name', "ELK_ID", "deviceGroup",
'flag', 'parent', 'parent-type']
_setlist = []
_propalias = {'id': 'address', 'addr': 'address',
"group-flag": "flag"}
def __init__(self, *args):
#self._objtype = (2, "scene")
self._objtype = "scene"
super(self.__class__, self).__init__(*args)
# status property
# obj mathod for getting/setting a Scene's value
# where in most cases light is how bright the light is
def set_status(self, new_value):
""" set status value of Scene """
return self._set_prop("ST", new_value)
status = property(None, set_status)
def _getmembers(self) :
""" List members of a scene or group """
if "members" in self._mydict :
return self._mydict["members"].keys()
else :
return None
members = property(_getmembers)
def member_list(self) :
return self._getmembers()
def is_member(self, obj) :
if "members" in self._mydict :
if isinstance(obj, str) :
return obj in self._mydict["members"]
elif isinstance(obj, _IsyNodeBase) :
return obj._get_prop("address") in self._mydict["members"]
return False
def rename(self, newname) :
""" rename node/scene/folder """
return self._rename("RenameGroup", newname)
def member_del(self, node) :
r = self.isy.scene_del_node(
self._mydict["address"],
node)
# r = self.isy.soapcomm("RemoveFromGroup",
# node=node._get_prop("address"),
# group=self._mydict["address"])
return r
def member_add_controler(self, node, flag=16) :
""" Add Node to scene/group as Responder """
return self.member_add(node, flag)
def member_add_responder(self, node, flag=32) :
""" Add Node to scene/group Controller """
return self.member_add(node, flag)
def member_add(self, node, flag=16) :
""" Add Node to scene/group """
r = self.isy.scene_add_node(
self._mydict["address"],
node,
flag=0x10)
# r = self.isy.soapcomm("MoveNode",
# node=node._get_prop("address"),
# group=self._mydict["address"],
# flag=16)
return r
def member_iter(self, flag=0):
""" iter though members
Folders iter though their contents (nodes/scenes/folders)
Scene iter though their members (nodes)
Nodes iter though sub-nodes (nodes)
"""
if "members" in self._mydict :
for k in list(self._mydict["members"].keys()) :
if flag and not(flag & self._mydict["members"][k]) :
continue
else :
yield k
def __iter__(self):
return self.member_iter()
# check if scene _contains_ node
def __contains__(self, other):
return self.is_member(other)
class IsyNodeFolder(_IsyNodeBase):
""" Node Folder Class for ISY
readonly attributes :
address
name
flag
"""
_getlist = ['address', 'name', 'flag']
_setlist = []
_propalias = {'id': 'address', 'addr': 'address', "folder-flag": "flag"}
def __init__(self, *args):
#self._objtype = (3, "folder")
self._objtype = "folder"
super(self.__class__, self).__init__(*args)
def member_add(self, node, flag=0) :
""" add Node/Scene or Folder to Folder Obj
Args:
node = address, name or Node/Scene/Folder Obj
sets Parent for node/scene/folder to current Obj Folder
calls SOAP SetParent()
"""
r = self.isy.soapcomm("SetParent",
node=node._get_prop("address"), nodeType=node.nodeType(),
parent=self._mydict["address"], parentType=self.nodeType())
return r
def member_del(self, node) :
""" del Node/Scene or Folder to Folder Obj
Args:
node = address, name or Node/Scene/Folder Obj
del node/scene/folder to current Obj Folder
(and moves to base folder)
calls SOAP SetParent()
"""
r = self.isy.soapcomm("SetParent",
node=node._get_prop("address"), nodeType=node.nodeType())
return r
def rename(self, newname) :
""" renames current Obj Folder
args :
name = new folder name
calls SOAP RenameFolder()
"""
return self._rename("RenameFolder", newname)
def __iter__(self):
return self.member_iter()
def __contains__(self, other):
pass
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
| bsd-2-clause |
victoryckl/zxing-2.2 | cpp/scons/scons-local-2.0.0.final.0/SCons/Errors.py | 34 | 7440 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""SCons.Errors
This file contains the exception classes used to handle internal
and user errors in SCons.
"""
__revision__ = "src/engine/SCons/Errors.py 5023 2010/06/14 22:05:46 scons"
import SCons.Util
import exceptions
class BuildError(Exception):
""" Errors occuring while building.
BuildError have the following attributes:
Information about the cause of the build error:
-----------------------------------------------
errstr : a description of the error message
status : the return code of the action that caused the build
error. Must be set to a non-zero value even if the
build error is not due to an action returning a
non-zero returned code.
exitstatus : SCons exit status due to this build error.
Must be nonzero unless due to an explicit Exit()
call. Not always the same as status, since
actions return a status code that should be
respected, but SCons typically exits with 2
irrespective of the return value of the failed
action.
filename : The name of the file or directory that caused the
build error. Set to None if no files are associated with
this error. This might be different from the target
being built. For example, failure to create the
directory in which the target file will appear. It
can be None if the error is not due to a particular
filename.
exc_info : Info about exception that caused the build
error. Set to (None, None, None) if this build
error is not due to an exception.
Information about the cause of the location of the error:
---------------------------------------------------------
node : the error occured while building this target node(s)
executor : the executor that caused the build to fail (might
be None if the build failures is not due to the
executor failing)
action : the action that caused the build to fail (might be
None if the build failures is not due to the an
action failure)
command : the command line for the action that caused the
build to fail (might be None if the build failures
is not due to the an action failure)
"""
def __init__(self,
node=None, errstr="Unknown error", status=2, exitstatus=2,
filename=None, executor=None, action=None, command=None,
exc_info=(None, None, None)):
self.errstr = errstr
self.status = status
self.exitstatus = exitstatus
self.filename = filename
self.exc_info = exc_info
self.node = node
self.executor = executor
self.action = action
self.command = command
Exception.__init__(self, node, errstr, status, exitstatus, filename,
executor, action, command, exc_info)
def __str__(self):
if self.filename:
return self.filename + ': ' + self.errstr
else:
return self.errstr
class InternalError(Exception):
pass
class UserError(Exception):
pass
class StopError(Exception):
pass
class EnvironmentError(Exception):
pass
class MSVCError(IOError):
pass
class ExplicitExit(Exception):
def __init__(self, node=None, status=None, *args):
self.node = node
self.status = status
self.exitstatus = status
Exception.__init__(self, *args)
def convert_to_BuildError(status, exc_info=None):
"""
Convert any return code a BuildError Exception.
`status' can either be a return code or an Exception.
The buildError.status we set here will normally be
used as the exit status of the "scons" process.
"""
if not exc_info and isinstance(status, Exception):
exc_info = (status.__class__, status, None)
if isinstance(status, BuildError):
buildError = status
buildError.exitstatus = 2 # always exit with 2 on build errors
elif isinstance(status, ExplicitExit):
status = status.status
errstr = 'Explicit exit, status %s' % status
buildError = BuildError(
errstr=errstr,
status=status, # might be 0, OK here
exitstatus=status, # might be 0, OK here
exc_info=exc_info)
elif isinstance(status, (StopError, UserError)):
buildError = BuildError(
errstr=str(status),
status=2,
exitstatus=2,
exc_info=exc_info)
elif isinstance(status, exceptions.EnvironmentError):
# If an IOError/OSError happens, raise a BuildError.
# Report the name of the file or directory that caused the
# error, which might be different from the target being built
# (for example, failure to create the directory in which the
# target file will appear).
try: filename = status.filename
except AttributeError: filename = None
buildError = BuildError(
errstr=status.strerror,
status=status.errno,
exitstatus=2,
filename=filename,
exc_info=exc_info)
elif isinstance(status, Exception):
buildError = BuildError(
errstr='%s : %s' % (status.__class__.__name__, status),
status=2,
exitstatus=2,
exc_info=exc_info)
elif SCons.Util.is_String(status):
buildError = BuildError(
errstr=status,
status=2,
exitstatus=2)
else:
buildError = BuildError(
errstr="Error %s" % status,
status=status,
exitstatus=2)
#import sys
#sys.stderr.write("convert_to_BuildError: status %s => (errstr %s, status %s)"%(status,buildError.errstr, buildError.status))
return buildError
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
fin-ger/alternative-toolbar | alttoolbar_repeat.py | 1 | 16340 | # This is a part of the external Repeat One Song plugin for Rhythmbox
#
# Author: Eduardo Mucelli Rezende Oliveira
# E-mail: [email protected] or [email protected]
# Version: 0.4 (Unstable) for Rhythmbox 3.0.1 or later
#
#
# reworked for alternative-toolbar
# Author: fossfreedom
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Gio
from alttoolbar_rb3compat import gtk_version
from alttoolbar_preferences import GSetting
from alttoolbar_preferences import CoverLocale
class Repeat(GObject.Object):
def __init__(self, shell, toggle_button):
"""
:param toggle_button: button that controls the repeat functions
:return:
"""
GObject.Object.__init__(self)
# use this to start the repeat-one-song capability (if True)
self.repeat_song = False
self.shell = shell
self.toggle_button = toggle_button
# self.one_song_stprint ("adjoining")
ate_normal, self.one_song_state_eos = range(2)
# self.one_song_state = self.one_song_state_normal
player = self.shell.props.shell_player
# Please refer to the comments above to understand why those
# two callbacks are not being used currently, Rhytmbox 2.99.1
# player.connect('playing-song-changed', self.on_song_change)
# player.props.player.connect('eos', self.on_gst_player_eos)
player.connect('elapsed-changed', self.on_elapsed_change)
if gtk_version() >= 3.12:
popover = Gtk.Popover.new(toggle_button)
repeat = RepeatPopContainer(popover, toggle_button)
popover.add(repeat)
popover.set_modal(False)
else:
# use our custom Popover equivalent for Gtk+3.10 folks
popover = CustomPopover(toggle_button)
repeat = RepeatPopContainer(popover, toggle_button)
popover.add(repeat)
toggle_button.connect("toggled", self._on_toggle, popover, repeat)
repeat.connect('repeat-type-changed', self._on_repeat_type_changed)
self._on_repeat_type_changed(repeat, repeat.get_repeat_type())
def _on_toggle(self, toggle, popover, repeat):
if toggle.get_active():
popover.show_all()
self.repeat_song = \
repeat.get_repeat_type() == RepeatPopContainer.ONE_SONG
else:
popover.hide()
self.repeat_song = False
self._set_toggle_tooltip(repeat)
print("on toggle", self.repeat_song)
def _set_toggle_tooltip(self, repeat):
# locale stuff
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
if self.toggle_button.get_has_tooltip():
if repeat.get_repeat_type() == RepeatPopContainer.ALL_SONGS:
message = _("Repeat all tracks")
else:
message = _("Repeat the current track")
self.toggle_button.set_tooltip_text(message)
cl = CoverLocale()
cl.switch_locale(cl.Locale.RB)
def _on_repeat_type_changed(self, repeat, repeat_type):
if self.toggle_button.get_active():
if repeat_type == RepeatPopContainer.ONE_SONG:
self.repeat_song = True
else:
self.repeat_song = False
else:
self.repeat_song = False
self._set_toggle_tooltip(repeat)
print("repeat type changed", self.repeat_song)
# Looks like there is a bug on gstreamer player and a seg fault
# happens as soon as the 'eos' callback is called.
# https://bugs.launchpad.net/ubuntu/+source/rhythmbox/+bug/1239218
# As soon it gets fixed or a code-based workaround gets available,
# this method in conjunction with on_song_change will be used as
# the way to control the song repetition. Meanwhile, on_elapsed_change
# will be the chosen solution
def on_gst_player_eos(self, gst_player, stream_data, early=0):
# EOS signal means that the song changed because the song is over.
# ie. the user did not explicitly change the song.
# https://developer.gnome.org/rhythmbox/
# unstable/RBPlayer.html#RBPlayer-eos
if self.repeat_song:
self.one_song_state = self.one_song_state_eos
# This is a old method to 'repeat' the current song as soon as it
# reaches the last second. Will be the used until the bug mentioned on the
# comments above gets fixed.
def on_song_change(self, player, time):
if self.one_song_state == self.one_song_state_eos:
self.one_song_state = self.one_song_state_normal
player.do_previous()
# This is a old method to 'repeat' the current song as soon as it
# reaches the last second. Will be the used until the bug mentioned on the
# comments above gets fixed.
# This might be improved keeping a instance variable with the duration and
# updating it on_song_change. Therefore, it would not be
# necessary to query the duration every time
def on_elapsed_change(self, player, time):
if self.repeat_song:
duration = player.get_playing_song_duration()
if duration > 0:
# Repeat on the last two seconds of the song. Previously the
# last second was used but RB now seems to use the last second
# to prepare things for the next song of the list
if time >= duration - 2:
player.set_playing_time(0)
class RepeatPopContainer(Gtk.ButtonBox):
__gsignals__ = {
"repeat-type-changed": (GObject.SIGNAL_RUN_LAST, None, (int,))
}
# repeat-type-changed is emitted with one of the following values
ONE_SONG = 1
ALL_SONGS = 2
def __init__(self, parent_container, parent_button, *args, **kwargs):
super(RepeatPopContainer, self).__init__(*args, **kwargs)
self.set_orientation(Gtk.Orientation.HORIZONTAL)
self.set_layout(Gtk.ButtonBoxStyle.START)
self.props.margin = 5
context = self.get_style_context()
context.add_class('linked')
icon_size = 4
toggle1 = Gtk.RadioButton.new(None)
toggle1.set_mode(False)
fallback = 'media-playlist-repeat-symbolic'
icon = Gio.ThemedIcon.new_with_default_fallbacks(fallback)
image = Gtk.Image()
image.set_from_gicon(icon, icon_size)
image.props.margin = 5
toggle1.set_image(image)
toggle1.connect('leave-notify-event', self._on_popover_mouse_over)
toggle1.connect('enter-notify-event', self._on_popover_mouse_over)
toggle1.connect('toggled', self._on_popover_button_toggled)
# locale stuff
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
if parent_button.get_has_tooltip():
toggle1.set_tooltip_text(_("Repeat all tracks"))
self._repeat_button = toggle1
self.add(toggle1)
self.child_set_property(toggle1, "non-homogeneous", True)
toggle1.show_all()
self._repeat_image = Gtk.Image()
self._repeat_image.set_from_gicon(icon, icon_size)
self._repeat_image.props.margin = 5
toggle2 = Gtk.RadioButton.new_from_widget(toggle1)
toggle2.set_mode(False)
sym = 'media-playlist-repeat-song-symbolic'
icon2 = Gio.ThemedIcon.new_with_default_fallbacks(sym)
image2 = Gtk.Image()
image2.set_from_gicon(icon2, icon_size)
image2.props.margin = 5
toggle2.set_image(image2)
if parent_button.get_has_tooltip():
toggle2.set_tooltip_text(_("Repeat the current track"))
self._repeat_song_image = Gtk.Image()
self._repeat_song_image.set_from_gicon(icon2, icon_size)
self._repeat_song_image.props.margin = 5
toggle2.connect('leave-notify-event', self._on_popover_mouse_over)
toggle2.connect('enter-notify-event', self._on_popover_mouse_over)
toggle2.connect('toggled', self._on_popover_button_toggled)
toggle2.show_all()
self._repeat_song_button = toggle2
self.add(toggle2)
self.child_set_property(toggle2, "non-homogeneous", True)
self._popover_inprogress = 0
parent_container.connect('leave-notify-event',
self._on_popover_mouse_over)
parent_container.connect('enter-notify-event',
self._on_popover_mouse_over)
parent_button.connect('leave-notify-event',
self._on_popover_mouse_over)
parent_button.connect('enter-notify-event',
self._on_popover_mouse_over)
parent_button.set_image(self._repeat_image)
self._parent_container = parent_container
self._parent_button = parent_button
# now get the repeat-type saved in gsettings
# get values from gsettings
self.gs = GSetting()
self.plugin_settings = self.gs.get_setting(self.gs.Path.PLUGIN)
repeat_type = self.plugin_settings[self.gs.PluginKey.REPEAT_TYPE]
if repeat_type == RepeatPopContainer.ONE_SONG:
self._repeat_song_button.set_active(True)
def _on_popover_button_toggled(self, button, *args):
print("popover toggle")
if button.get_active():
if button == self._repeat_button:
self._parent_button.set_image(self._repeat_image)
self.emit('repeat-type-changed', RepeatPopContainer.ALL_SONGS)
self.plugin_settings[self.gs.PluginKey.REPEAT_TYPE] = \
RepeatPopContainer.ALL_SONGS
else:
self._parent_button.set_image(self._repeat_song_image)
self.emit('repeat-type-changed', RepeatPopContainer.ONE_SONG)
self.plugin_settings[self.gs.PluginKey.REPEAT_TYPE] = \
RepeatPopContainer.ONE_SONG
def get_repeat_type(self):
repeat_type = RepeatPopContainer.ALL_SONGS
if self._repeat_song_button.get_active():
repeat_type = RepeatPopContainer.ONE_SONG
return repeat_type
def _on_popover_mouse_over(self, widget, eventcrossing):
if eventcrossing.type == Gdk.EventType.ENTER_NOTIFY:
if self._popover_inprogress == 0:
self._popover_inprogress = 1
print("enter1")
else:
self._popover_inprogress = 2
print("enter2")
self._popover_inprogress_count = 0
if type(widget) is Gtk.ToggleButton:
print("here")
if widget.get_active():
print(self._parent_container)
self._parent_container.show_all()
else:
print("exit")
self._popover_inprogress = 3
def delayed(*args):
if self._popover_inprogress == 3:
self._popover_inprogress_count += 1
if self._popover_inprogress_count < 5:
return True
self._parent_container.hide()
self._popover_inprogress = 0
print("exit timeout")
return False
else:
return True
if self._popover_inprogress == 1:
print("adding timeout")
self._popover_inprogress = 2
GLib.timeout_add(100, delayed)
class CustomPopover(Gtk.Window):
def __init__(self, parent_button, *args, **kwargs):
super(CustomPopover, self).__init__(type=Gtk.WindowType.POPUP, *args,
**kwargs)
self.set_decorated(False)
self.set_resizable(False)
self.set_type_hint(Gdk.WindowTypeHint.DOCK)
self.stick()
self._parent_button = parent_button
self.connect_after('show', self._on_show)
# Track movements of the window to move calendar window as well
self.connect("configure-event", self.on_window_config)
def add(self, widget):
self._frame = Gtk.Frame()
self._frame.add(widget)
super(CustomPopover, self).add(self._frame)
self._frame.show_all()
# Popoverwindow co ordinates without off-screen correction:
# Window origin (x, y)
# |
# V
# ---------------------------------
# | Main Window |
# | |
# | |
# |Toggle button's (x, y) |
# |(relative to parent window) |
# | | |
# | V |
# | ......................... |
# Popover | | Toggle Button | |
# window's | | | |
# (x, y)---+> ......................... |
# |(window will be here) |
# | |
# | |
# ---------------------------------
# Popover Window's screen coordinates:
# x = Window's origin x + Toggle Button's relative x
# y = Window's origin y + Toggle Button's relative y + Toggle Button's
# height
def _on_show(self, widget):
rect = self._parent_button.get_allocation()
main_window = self._parent_button.get_toplevel()
[val, win_x, win_y] = main_window.get_window().get_origin()
cal_x = win_x + rect.x
cal_y = win_y + rect.y + rect.height
[x, y] = self.apply_screen_coord_correction(cal_x, cal_y)
self.move(x, y)
# This function "tries" to correct calendar window position so that it is
# not obscured when
# a portion of main window is off-screen.
# Known bug: If the main window is partially off-screen before Calendar
# window
# has been realized then get_allocation() will return rect of 1x1 in which
# case
# the calculations will fail & correction will not be applied
def apply_screen_coord_correction(self, x, y):
corrected_y = y
corrected_x = x
rect = self.get_allocation()
screen_w = Gdk.Screen.width()
screen_h = Gdk.Screen.height()
delta_x = screen_w - (x + rect.width)
delta_y = screen_h - (y + rect.height)
if delta_x < 0:
corrected_x += delta_x
print("at x")
if corrected_x < 0:
corrected_x = 0
button_rect = self._parent_button.get_allocation()
window_width, window_height = \
self._parent_button.get_toplevel().get_size()
# print (y, button_rect.y, button_rect.height, )
calc = (window_height - (button_rect.y + (button_rect.height * 2)))
if delta_y < 0 or (calc < 0):
btn_hgt = self._parent_button.get_allocation().height
corrected_y = y - rect.height - btn_hgt
print("at y")
if corrected_y < 0:
corrected_y = 0
return [corrected_x, corrected_y]
# "configure-event" callback of main window, try to move calendar window
# along with main window.
def on_window_config(self, widget, event):
# Maybe better way to find the visiblilty
if self.get_mapped():
rect = self._parent_button.get_allocation()
main_window = self._parent_button.get_toplevel()
[val, win_x, win_y] = main_window.get_window().get_origin()
cal_x = win_x + rect.x
cal_y = win_y + rect.y + rect.height
self.show_all()
[x, y] = self.apply_screen_coord_correction(cal_x, cal_y)
self.move(x, y)
| gpl-3.0 |
christer155/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/base64_codec.py | 88 | 2417 | """ Python 'base64_codec' Codec - base64 content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs, base64
### Codec APIs
def base64_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.encodestring(input)
return (output, len(input))
def base64_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.decodestring(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return base64_encode(input,errors)
def decode(self, input,errors='strict'):
return base64_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return base64.encodestring(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return base64.decodestring(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='base64',
encode=base64_encode,
decode=base64_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
edisonlz/fruit | web_project/base/site-packages/androguard/core/binaries/idapipe.py | 7 | 6942 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE, STDOUT
import os, sys
import xmlrpclib
import cPickle
class _Method :
def __init__(self, proxy, name) :
self.proxy = proxy
self.name = name
def __call__(self, *args):
#print "CALL", self.name, args
z = getattr( self.proxy, self.name, None )
#print "SEND", repr(cPickle.dumps( args ) )
try :
if len(args) == 1 :
ret = z( cPickle.dumps( args[0] ) )
else :
ret = z( cPickle.dumps( args ) )
#print "RECEIVE", repr(ret)
return cPickle.loads( ret )
except xmlrpclib.ProtocolError :
return []
class MyXMLRPC :
def __init__(self, proxy) :
self.proxy = proxy
def __getattr__(self, name) :
return _Method(self.proxy, name)
class BasicBlock :
def __init__(self, ins) :
self.ins = ins
def show(self) :
for i in self.ins :
print i
class Function :
def __init__(self, name, start_ea, instructions, information) :
#print name, start_ea
self.name = name
self.start_ea = start_ea
self.information = information
self.basic_blocks = []
self.instructions = instructions
r = {}
idx = 0
for i in instructions :
r[ i[0] ] = idx
idx += 1
for i in information[0] :
try :
start = r[i[0]]
end = r[i[1]] + 1
self.basic_blocks.append( BasicBlock( instructions[start:end] ) )
except KeyError :
pass
def get_instructions(self) :
return [ i for i in self.instructions ]
def run_ida(idapath, wrapper_init_path, binpath) :
os.environ["TVHEADLESS"] = "1"
pid = os.fork()
if pid == 0:
wrapper_path = "-S" + wrapper_init_path
l = [ idapath, "-A", wrapper_path, binpath ]
print l
compile = Popen(l, stdout=open('/dev/null', 'w'), stderr=STDOUT)
stdout, stderr = compile.communicate()
# print stdout, stderr
sys.exit(0)
class IDAPipe :
def __init__(self, idapath, binpath, wrapper_init_path) :
self.idapath = idapath
self.binpath = binpath
self.proxy = None
run_ida(self.idapath, self.binpath, wrapper_init_path)
while 1 :
try :
self.proxy = xmlrpclib.ServerProxy("http://localhost:9000/")
self.proxy.is_connected()
break
except :
pass
#print self.proxy
self.proxy = MyXMLRPC( self.proxy )
def quit(self) :
try :
self.proxy.quit()
except :
pass
def _build_functions(self, functions) :
F = {}
for i in functions :
F[ i ] = Function( functions[i][0], i, functions[i][1:-1], functions[i][-1] )
return F
def get_quick_functions(self) :
functions = self.get_raw()
return self._build_functions( functions )
def get_raw(self) :
return self.proxy.get_raw()
def get_nb_functions(self) :
return len(self.proxy.Functions())
def get_functions(self) :
for function_ea in self.proxy.Functions() :
self.get_function_addr( function_ea )
def get_function_name(self, name) :
function_ea = self.proxy.get_function( name )
self.get_function_addr( function_ea )
def get_function_addr(self, function_ea) :
if function_ea == -1 :
return
f_start = function_ea
f_end = self.proxy.GetFunctionAttr(function_ea, 4) #FUNCATTR_END)
edges = set()
boundaries = set((f_start,))
for head in self.proxy.Heads(f_start, f_end) :
if self.proxy.isCode( self.proxy.GetFlags( head ) ) :
refs = self.proxy.CodeRefsFrom(head, 0)
refs = set(filter(lambda x: x>=f_start and x<=f_end, refs))
#print head, f_end, refs, self.proxy.GetMnem(head), self.proxy.GetOpnd(head, 0), self.proxy.GetOpnd(head, 1)
if refs :
next_head = self.proxy.NextHead(head, f_end)
if self.proxy.isFlow(self.proxy.GetFlags(next_head)):
refs.add(next_head)
# Update the boundaries found so far.
boundaries.update(refs)
# For each of the references found, and edge is
# created.
for r in refs:
# If the flow could also come from the address
# previous to the destination of the branching
# an edge is created.
if self.proxy.isFlow(self.proxy.GetFlags(r)):
edges.add((self.proxy.PrevHead(r, f_start), r))
edges.add((head, r))
#print edges, boundaries
# Let's build the list of (startEA, startEA) couples
# for each basic block
sorted_boundaries = sorted(boundaries, reverse = True)
end_addr = self.proxy.PrevHead(f_end, f_start)
bb_addr = []
for begin_addr in sorted_boundaries:
bb_addr.append((begin_addr, end_addr))
# search the next end_addr which could be
# farther than just the previous head
# if data are interlaced in the code
# WARNING: it assumes it won't epicly fail ;)
end_addr = self.proxy.PrevHead(begin_addr, f_start)
while not self.proxy.isCode(self.proxy.GetFlags(end_addr)):
end_addr = self.proxy.PrevHead(end_addr, f_start)
# And finally return the result
bb_addr.reverse()
#print bb_addr, sorted(edges)
def display_function(f) :
print f, f.name, f.information
for i in f.basic_blocks :
print i
i.show()
| apache-2.0 |
nlloyd/SubliminalCollaborator | libs/twisted/test/test_epoll.py | 10 | 4326 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for epoll wrapper.
"""
import socket, errno, time
from twisted.trial import unittest
from twisted.python.util import untilConcludes
try:
from twisted.python import _epoll
except ImportError:
_epoll = None
class EPoll(unittest.TestCase):
"""
Tests for the low-level epoll bindings.
"""
def setUp(self):
"""
Create a listening server port and a list with which to keep track
of created sockets.
"""
self.serverSocket = socket.socket()
self.serverSocket.bind(('127.0.0.1', 0))
self.serverSocket.listen(1)
self.connections = [self.serverSocket]
def tearDown(self):
"""
Close any sockets which were opened by the test.
"""
for skt in self.connections:
skt.close()
def _connectedPair(self):
"""
Return the two sockets which make up a new TCP connection.
"""
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', self.serverSocket.getsockname()[1]))
except socket.error, e:
self.assertEqual(e.args[0], errno.EINPROGRESS)
else:
raise unittest.FailTest("Connect should have raised EINPROGRESS")
server, addr = self.serverSocket.accept()
self.connections.extend((client, server))
return client, server
def test_create(self):
"""
Test the creation of an epoll object.
"""
try:
p = _epoll.epoll(16)
except OSError, e:
raise unittest.FailTest(str(e))
else:
p.close()
def test_badCreate(self):
"""
Test that attempting to create an epoll object with some random
objects raises a TypeError.
"""
self.assertRaises(TypeError, _epoll.epoll, 1, 2, 3)
self.assertRaises(TypeError, _epoll.epoll, 'foo')
self.assertRaises(TypeError, _epoll.epoll, None)
self.assertRaises(TypeError, _epoll.epoll, ())
self.assertRaises(TypeError, _epoll.epoll, ['foo'])
self.assertRaises(TypeError, _epoll.epoll, {})
def test_add(self):
"""
Test adding a socket to an epoll object.
"""
server, client = self._connectedPair()
p = _epoll.epoll(2)
try:
p._control(_epoll.CTL_ADD, server.fileno(), _epoll.IN | _epoll.OUT)
p._control(_epoll.CTL_ADD, client.fileno(), _epoll.IN | _epoll.OUT)
finally:
p.close()
def test_controlAndWait(self):
"""
Test waiting on an epoll object which has had some sockets added to
it.
"""
client, server = self._connectedPair()
p = _epoll.epoll(16)
p._control(_epoll.CTL_ADD, client.fileno(), _epoll.IN | _epoll.OUT |
_epoll.ET)
p._control(_epoll.CTL_ADD, server.fileno(), _epoll.IN | _epoll.OUT |
_epoll.ET)
now = time.time()
events = untilConcludes(p.wait, 4, 1000)
then = time.time()
self.failIf(then - now > 0.01)
events.sort()
expected = [(client.fileno(), _epoll.OUT),
(server.fileno(), _epoll.OUT)]
expected.sort()
self.assertEqual(events, expected)
now = time.time()
events = untilConcludes(p.wait, 4, 200)
then = time.time()
self.failUnless(then - now > 0.1)
self.failIf(events)
client.send("Hello!")
server.send("world!!!")
now = time.time()
events = untilConcludes(p.wait, 4, 1000)
then = time.time()
self.failIf(then - now > 0.01)
events.sort()
expected = [(client.fileno(), _epoll.IN | _epoll.OUT),
(server.fileno(), _epoll.IN | _epoll.OUT)]
expected.sort()
self.assertEqual(events, expected)
if _epoll is None:
EPoll.skip = "_epoll module unavailable"
else:
try:
e = _epoll.epoll(16)
except IOError, exc:
if exc.errno == errno.ENOSYS:
del exc
EPoll.skip = "epoll support missing from platform"
else:
raise
else:
e.close()
del e
| apache-2.0 |
tj93/pymtl | accel/strsearch/StrSearchFunc_test.py | 7 | 1324 | #=========================================================================
# StrSearchFunc_test.py
#=========================================================================
#
# PyMTL Functional Model of strsearch.
from pymtl import *
from StrSearchOO_test import strings, docs, reference
#-------------------------------------------------------------------------
# run_test
#-------------------------------------------------------------------------
def run_test( SearchModel ):
i = 0
for string in strings:
# Instantiate the model, elaborate it, and create a simulator
model = SearchModel( string )
model.elaborate()
sim = SimulationTool( model )
sim.reset()
for doc in docs:
model.in_.v = doc
sim.cycle()
assert model.out == reference[ i ]
i += 1
from StrSearchFunc import StrSearchMath, StrSearchAlg
#-------------------------------------------------------------------------
# test_strsearch_math
#-------------------------------------------------------------------------
def test_strsearch_math():
run_test( StrSearchMath )
#-------------------------------------------------------------------------
# test_strsearch_alg
#-------------------------------------------------------------------------
def test_strsearch_alg():
run_test( StrSearchAlg )
| bsd-3-clause |
shoyer/numpy | numpy/distutils/line_endings.py | 6 | 2085 | """ Functions for converting from DOS to UNIX line endings
"""
from __future__ import division, absolute_import, print_function
import sys, re, os
def dos2unix(file):
"Replace CRLF with LF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
with open(file, "rb") as fp:
data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
if newdata != data:
print('dos2unix:', file)
with open(file, "wb") as f:
f.write(newdata)
return file
else:
print(file, 'ok')
def dos2unix_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
file = dos2unix(full_path)
if file is not None:
modified_files.append(file)
def dos2unix_dir(dir_name):
modified_files = []
os.path.walk(dir_name, dos2unix_one_dir, modified_files)
return modified_files
#----------------------------------
def unix2dos(file):
"Replace LF with CRLF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
with open(file, "rb") as fp:
data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
newdata = re.sub("\n", "\r\n", newdata)
if newdata != data:
print('unix2dos:', file)
with open(file, "wb") as f:
f.write(newdata)
return file
else:
print(file, 'ok')
def unix2dos_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
unix2dos(full_path)
if file is not None:
modified_files.append(file)
def unix2dos_dir(dir_name):
modified_files = []
os.path.walk(dir_name, unix2dos_one_dir, modified_files)
return modified_files
if __name__ == "__main__":
dos2unix_dir(sys.argv[1])
| bsd-3-clause |
ilendl2/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/products/migrations/0008_auto_20180607_1804.py | 2 | 1109 | # Generated by Django 2.0 on 2018-06-07 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0007_productindexpage_feed_image'),
]
operations = [
migrations.AlterField(
model_name='productindexpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='productpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='productpagetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_productpagetag_items', to='taggit.Tag'),
),
]
| mit |
claws/txBOM | setup.py | 1 | 1666 | #!/usr/bin/env python
"""
A distutils installation script for txBOM.
"""
from distutils.core import setup
import txbom
long_description = """txBOM is a Python Twisted package that lets you retrieve forecasts
and observations from the Australian Bureau of Meteorology (BOM).
Use it to integrate non blocking retrieval of Australian Bureau of
Meteorology forecasts and observations into your Python Twisted
application.
"""
setup(name='txbom',
version='.'.join([str(x) for x in txbom.version]),
description='txbom is a Python Twisted package that lets you retrieve forecasts and observations from the Australian Bureau of Meteorology (BOM).',
long_description=long_description,
author='Chris Laws',
author_email='[email protected]',
license='http://www.opensource.org/licenses/mit-license.php',
url='https://github.com/claws/txBOM',
download_url='https://github.com/claws/txBOM/tarball/master',
packages=['txbom'],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Twisted',
'Topic :: Communications',
'Topic :: Home Automation',
'Topic :: System :: Monitoring',
'Topic :: Software Development :: Libraries :: Python Modules'],
requires=['Twisted']
)
| mit |
Threak/easyrel | getfav.py | 1 | 3507 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import os.path
import oauth2 as oauth
import json
from font_colors import font_colors
import getrel
import setfav
import time
from torrentApi import torrent_api as api
#name of config file where all keys get stored
config = '~/.config/getrel/getrel.json'
nzb_path = '~/.get_fav/nzbs'
def decode_json(resp):
fav_dict = json.loads(resp)
fav_list = []
for fav in fav_dict['payload']:
#print fav
#if there are no releases in any list, the key 'releases' does not exist
if ('releases' not in fav):
continue
if (fav['releases']):
#print fav['releases']
for dirname in fav['releases']:
fav_list.append(dirname['dirname'])
return fav_list
config = os.path.expanduser(config)
try:
with open(config, 'r') as f:
config_dict = json.loads(f.read())
except IOError:
print 'please run auth_xrel first'
exit(-42)
config_args = getrel.init_argparse(config)
parsed_config = getrel.init_configparser(config)
config_xrel = config_dict['xrel']
consumer_key = config_xrel['consumer_key']
consumer_secret = config_xrel['consumer_secret']
oauth_token = config_xrel['oauth_token']
oauth_token_secret = config_xrel['oauth_token_secret']
url = 'http://api.xrel.to/api/favs/lists.json'
consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
token = oauth.Token(key=oauth_token, secret=oauth_token_secret)
client = oauth.Client(consumer, token)
resp, content = client.request(url)
favdict = {}
favlists = json.loads(content[11:-3])['payload']
nzb_path = os.path.expanduser(nzb_path)
for favlist in favlists:
listname = favlist['name']
if listname in config_dict['skip']:
continue
listid = favlist['id']
new_dir = os.path.join(nzb_path, listname)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
url = 'http://api.xrel.to/api/favs/list_entries.json?id=%d&get_releases=true' % listid
resp, content = client.request(url)
favdict[listid] = {'name': listname, 'rels': []}
for fav in json.loads(content[11:-3])['payload']:
if ('releases' not in fav):
continue
if (fav['releases']):
for dirname in fav['releases']:
relid = int(dirname['link_href'].split('/')[4])
favdict[listid]['rels'].append({'name': dirname['dirname'], 'id': relid})
try:
xrel_session = setfav.login({'username': config_xrel['username'], 'password': config_xrel['password']})
except:
pass
prefer_torrent = config_dict['torrent']['prefer']
torrent_download_path = config_dict['torrent']['dir']
if prefer_torrent:
torrentApi = api.TorrentApi(base_path=torrent_download_path)
for favlist in favdict:
listname = favdict[favlist]['name']
print '%s%s%s:' % (font_colors.f_magenta, listname, font_colors.f_reset)
new_dir = os.path.join(nzb_path, listname)
config_args['category'] = listname.lower()
for reldict in favdict[favlist]['rels']:
rel = reldict['name']
print '%s%s%s searching...' % (font_colors.f_yellow, rel, font_colors.f_reset)
config_args['query'] = rel
checked_args = getrel.check_args(config_args.copy(), parsed_config)
set_fav_data = {
'anticache': long(time.time()), # unix time stamp (long)
'isnew': 0, # mark as new, otherwise mark as read (boolean)
'wid': favlist, # watchlist id (int)
'rid': reldict['id'] # release id (int)
}
found_release = False
if prefer_torrent:
found_release = torrentApi.search(rel)
if not found_release:
found_release = getrel.main(checked_args)
if found_release:
if xrel_session:
setfav.set_fav_state(xrel_session, set_fav_data)
| mit |
Forage/Gramps | gramps/gen/plug/docgen/graphdoc.py | 1 | 41769 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from __future__ import unicode_literals
import os
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import BytesIO
import tempfile
from subprocess import Popen, PIPE
import sys
#-------------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
from ...utils.file import search_for
from . import BaseDoc
from ..menu import NumberOption, TextOption, EnumeratedListOption, \
BooleanOption
from ...constfunc import win
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".graphdoc")
#-------------------------------------------------------------------------------
#
# Private Constants
#
#-------------------------------------------------------------------------------
_FONTS = [ { 'name' : _("Default"), 'value' : "" },
{ 'name' : _("PostScript / Helvetica"), 'value' : "Helvetica" },
{ 'name' : _("TrueType / FreeSans"), 'value' : "FreeSans" } ]
_RANKDIR = [ { 'name' : _("Vertical (↓)"), 'value' : "TB" },
{ 'name' : _("Vertical (↑)"), 'value' : "BT" },
{ 'name' : _("Horizontal (→)"), 'value' : "LR" },
{ 'name' : _("Horizontal (←)"), 'value' : "RL" } ]
_PAGEDIR = [ { 'name' : _("Bottom, left"), 'value' :"BL" },
{ 'name' : _("Bottom, right"), 'value' :"BR" },
{ 'name' : _("Top, left"), 'value' :"TL" },
{ 'name' : _("Top, Right"), 'value' :"TR" },
{ 'name' : _("Right, bottom"), 'value' :"RB" },
{ 'name' : _("Right, top"), 'value' :"RT" },
{ 'name' : _("Left, bottom"), 'value' :"LB" },
{ 'name' : _("Left, top"), 'value' :"LT" } ]
_RATIO = [ { 'name' : _("Compress to minimal size"), 'value': "compress" },
{ 'name' : _("Fill the given area"), 'value': "fill" },
{ 'name' : _("Expand uniformly"), 'value': "expand" } ]
_NOTELOC = [ { 'name' : _("Top"), 'value' : "t" },
{ 'name' : _("Bottom"), 'value' : "b" }]
if win():
_DOT_FOUND = search_for("dot.exe")
if search_for("gswin32c.exe") == 1:
_GS_CMD = "gswin32c.exe"
elif search_for("gswin32.exe") == 1:
_GS_CMD = "gswin32.exe"
else:
_GS_CMD = ""
else:
_DOT_FOUND = search_for("dot")
if search_for("gs") == 1:
_GS_CMD = "gs"
else:
_GS_CMD = ""
#-------------------------------------------------------------------------------
#
# GVOptions
#
#-------------------------------------------------------------------------------
class GVOptions():
"""
Defines all of the controls necessary
to configure the graph reports.
"""
def __init__(self):
self.h_pages = None
self.v_pages = None
self.page_dir = None
self.dpi = None
def add_menu_options(self, menu):
"""
Add all graph related options to the menu.
@param menu: The menu the options should be added to.
@type menu: gen.plug.menu.Menu()
@return: nothing
"""
################################
category = _("GraphViz Layout")
################################
font_family = EnumeratedListOption(_("Font family"), "")
for item in _FONTS:
font_family.add_item(item["value"], item["name"])
font_family.set_help(_("Choose the font family. If international "
"characters don't show, use FreeSans font. "
"FreeSans is available from: "
"http://www.nongnu.org/freefont/"))
menu.add_option(category, "font_family", font_family)
font_size = NumberOption(_("Font size"), 14, 8, 128)
font_size.set_help(_("The font size, in points."))
menu.add_option(category, "font_size", font_size)
rank_dir = EnumeratedListOption(_("Graph Direction"), "TB")
for item in _RANKDIR:
rank_dir.add_item(item["value"], item["name"])
rank_dir.set_help(_("Whether graph goes from top to bottom "
"or left to right."))
menu.add_option(category, "rank_dir", rank_dir)
h_pages = NumberOption(_("Number of Horizontal Pages"), 1, 1, 25)
h_pages.set_help(_("GraphViz can create very large graphs by "
"spreading the graph across a rectangular "
"array of pages. This controls the number "
"pages in the array horizontally. "
"Only valid for dot and pdf via Ghostscript."))
menu.add_option(category, "h_pages", h_pages)
v_pages = NumberOption(_("Number of Vertical Pages"), 1, 1, 25)
v_pages.set_help(_("GraphViz can create very large graphs by "
"spreading the graph across a rectangular "
"array of pages. This controls the number "
"pages in the array vertically. "
"Only valid for dot and pdf via Ghostscript."))
menu.add_option(category, "v_pages", v_pages)
page_dir = EnumeratedListOption(_("Paging Direction"), "BL")
for item in _PAGEDIR:
page_dir.add_item(item["value"], item["name"])
page_dir.set_help(_("The order in which the graph pages are output. "
"This option only applies if the horizontal pages "
"or vertical pages are greater than 1."))
menu.add_option(category, "page_dir", page_dir)
# the page direction option only makes sense when the
# number of horizontal and/or vertical pages is > 1,
# so we need to remember these 3 controls for later
self.h_pages = h_pages
self.v_pages = v_pages
self.page_dir = page_dir
# the page direction option only makes sense when the
# number of horizontal and/or vertical pages is > 1
self.h_pages.connect('value-changed', self.pages_changed)
self.v_pages.connect('value-changed', self.pages_changed)
################################
category = _("GraphViz Options")
################################
aspect_ratio = EnumeratedListOption(_("Aspect ratio"), "fill")
for item in _RATIO:
aspect_ratio.add_item(item["value"], item["name"])
help_text = _('Affects node spacing and scaling of the graph.\n'
'If the graph is smaller than the print area:\n'
' Compress will not change the node spacing. \n'
' Fill will increase the node spacing to fit the print area in '
'both width and height.\n'
' Expand will increase the node spacing uniformly to preserve '
'the aspect ratio.\n'
'If the graph is larger than the print area:\n'
' Compress will shrink the graph to achieve tight packing at the '
'expense of symmetry.\n'
' Fill will shrink the graph to fit the print area after first '
'increasing the node spacing.\n'
' Expand will shrink the graph uniformly to fit the print area.')
aspect_ratio.set_help(help_text)
menu.add_option(category, "ratio", aspect_ratio)
dpi = NumberOption(_("DPI"), 75, 20, 1200)
dpi.set_help(_( "Dots per inch. When creating images such as "
".gif or .png files for the web, try numbers "
"such as 100 or 300 DPI. PostScript and PDF files "
"always use 72 DPI."))
menu.add_option(category, "dpi", dpi)
self.dpi = dpi
nodesep = NumberOption(_("Node spacing"), 0.20, 0.01, 5.00, 0.01)
nodesep.set_help(_( "The minimum amount of free space, in inches, "
"between individual nodes. For vertical graphs, "
"this corresponds to spacing between columns. "
"For horizontal graphs, this corresponds to "
"spacing between rows."))
menu.add_option(category, "nodesep", nodesep)
ranksep = NumberOption(_("Rank spacing"), 0.20, 0.01, 5.00, 0.01)
ranksep.set_help(_( "The minimum amount of free space, in inches, "
"between ranks. For vertical graphs, this "
"corresponds to spacing between rows. For "
"horizontal graphs, this corresponds to spacing "
"between columns."))
menu.add_option(category, "ranksep", ranksep)
use_subgraphs = BooleanOption(_('Use subgraphs'), True)
use_subgraphs.set_help(_("Subgraphs can help GraphViz position "
"spouses together, but with non-trivial "
"graphs will result in longer lines and "
"larger graphs."))
menu.add_option(category, "usesubgraphs", use_subgraphs)
################################
category = _("Note")
################################
note = TextOption(_("Note to add to the graph"),
[""] )
note.set_help(_("This text will be added to the graph."))
menu.add_option(category, "note", note)
noteloc = EnumeratedListOption(_("Note location"), 't')
for i in range( 0, len(_NOTELOC) ):
noteloc.add_item(_NOTELOC[i]["value"], _NOTELOC[i]["name"])
noteloc.set_help(_("Whether note will appear on top "
"or bottom of the page."))
menu.add_option(category, "noteloc", noteloc)
notesize = NumberOption(_("Note size"), 32, 8, 128)
notesize.set_help(_("The size of note text, in points."))
menu.add_option(category, "notesize", notesize)
def pages_changed(self):
"""
This method gets called every time the v_pages or h_pages
options are changed; when both vertical and horizontal
pages are set to "1", then the page_dir control needs to
be unavailable
"""
if self.v_pages.get_value() > 1 or \
self.h_pages.get_value() > 1:
self.page_dir.set_available(True)
else:
self.page_dir.set_available(False)
#-------------------------------------------------------------------------------
#
# GVDoc
#
#-------------------------------------------------------------------------------
class GVDoc(object):
"""
Abstract Interface for Graphviz document generators. Output formats
for Graphviz reports must implement this interface to be used by the
report system.
"""
def add_node(self, node_id, label, shape="", color="",
style="", fillcolor="", url="", htmloutput=False):
"""
Add a node to this graph. Nodes can be different shapes like boxes and
circles.
@param node_id: A unique identification value for this node.
Example: "p55"
@type node_id: string
@param label: The text to be displayed in the node.
Example: "John Smith"
@type label: string
@param shape: The shape for the node.
Examples: "box", "ellipse", "circle"
@type shape: string
@param color: The color of the node line.
Examples: "blue", "lightyellow"
@type color: string
@param style: The style of the node.
@type style: string
@param fillcolor: The fill color for the node.
Examples: "blue", "lightyellow"
@type fillcolor: string
@param url: A URL for the node.
@type url: string
@param htmloutput: Whether the label contains HTML.
@type htmloutput: boolean
@return: nothing
"""
raise NotImplementedError
def add_link(self, id1, id2, style="", head="", tail="", comment=""):
"""
Add a link between two nodes.
@param id1: The unique identifier of the starting node.
Example: "p55"
@type id1: string
@param id2: The unique identifier of the ending node.
Example: "p55"
@type id2: string
@param comment: A text string displayed at the end of the link line.
Example: "person C is the son of person A and person B"
@type comment: string
@return: nothing
"""
raise NotImplementedError
def add_comment(self, comment):
"""
Add a comment to the source file.
@param comment: A text string to add as a comment.
Example: "Next comes the individuals."
@type comment: string
@return: nothing
"""
raise NotImplementedError
def start_subgraph(self, graph_id):
"""
Start a subgraph in this graph.
@param id: The unique identifier of the subgraph.
Example: "p55"
@type id1: string
@return: nothing
"""
raise NotImplementedError
def end_subgraph(self):
"""
End a subgraph that was previously started in this graph.
@return: nothing
"""
raise NotImplementedError
#-------------------------------------------------------------------------------
#
# GVDocBase
#
#-------------------------------------------------------------------------------
class GVDocBase(BaseDoc, GVDoc):
"""
Base document generator for all Graphviz document generators. Classes that
inherit from this class will only need to implement the close function.
The close function will generate the actual file of the appropriate type.
"""
def __init__(self, options, paper_style):
BaseDoc.__init__(self, None, paper_style)
self._filename = None
if sys.version_info[0] < 3:
self._dot = StringIO()
else:
self._dot = BytesIO()
self._paper = paper_style
get_option_by_name = options.menu.get_option_by_name
get_value = lambda name: get_option_by_name(name).get_value()
self.dpi = get_value('dpi')
self.fontfamily = get_value('font_family')
self.fontsize = get_value('font_size')
self.hpages = get_value('h_pages')
self.nodesep = get_value('nodesep')
self.noteloc = get_value('noteloc')
self.notesize = get_value('notesize')
self.note = get_value('note')
self.pagedir = get_value('page_dir')
self.rankdir = get_value('rank_dir')
self.ranksep = get_value('ranksep')
self.ratio = get_value('ratio')
self.vpages = get_value('v_pages')
self.usesubgraphs = get_value('usesubgraphs')
paper_size = paper_style.get_size()
# Subtract 0.01" from the drawing area to make some room between
# this area and the margin in order to compensate for different
# rounding errors internally in dot
sizew = ( paper_size.get_width() -
self._paper.get_left_margin() -
self._paper.get_right_margin() ) / 2.54 - 0.01
sizeh = ( paper_size.get_height() -
self._paper.get_top_margin() -
self._paper.get_bottom_margin() ) / 2.54 - 0.01
pheight = paper_size.get_height_inches()
pwidth = paper_size.get_width_inches()
xmargin = self._paper.get_left_margin() / 2.54
ymargin = self._paper.get_top_margin() / 2.54
sizew *= self.hpages
sizeh *= self.vpages
self.write(
'digraph GRAMPS_graph\n'
'{\n'
' bgcolor=white;\n'
' center="true"; \n'
' charset="utf8";\n'
' concentrate="false";\n' +
' dpi="%d";\n' % self.dpi +
' graph [fontsize=%d];\n' % self.fontsize +
' margin="%3.2f,%3.2f"; \n' % (xmargin, ymargin) +
' mclimit="99";\n' +
' nodesep="%.2f";\n' % self.nodesep +
' outputorder="edgesfirst";\n' +
('#' if self.hpages == self.vpages == 1 else '') +
# comment out "page=" if the graph is on 1 page (bug #2121)
' page="%3.2f,%3.2f";\n' % (pwidth, pheight) +
' pagedir="%s";\n' % self.pagedir +
' rankdir="%s";\n' % self.rankdir +
' ranksep="%.2f";\n' % self.ranksep +
' ratio="%s";\n' % self.ratio +
' searchsize="100";\n' +
' size="%3.2f,%3.2f"; \n' % (sizew, sizeh) +
' splines="true";\n' +
'\n' +
' edge [len=0.5 style=solid fontsize=%d];\n' % self.fontsize
)
if self.fontfamily:
self.write( ' node [style=filled fontname="%s" fontsize=%d];\n'
% ( self.fontfamily, self.fontsize ) )
else:
self.write( ' node [style=filled fontsize=%d];\n'
% self.fontsize )
self.write( '\n' )
def write(self, text):
""" Write text to the dot file """
self._dot.write(text.encode('utf8', 'xmlcharrefreplace'))
def open(self, filename):
""" Implement GVDocBase.open() """
self._filename = os.path.normpath(os.path.abspath(filename))
def close(self):
"""
This isn't useful by itself. Other classes need to override this and
actually generate a file.
"""
if self.note:
# build up the label
label = ''
for line in self.note: # for every line in the note...
line = line.strip() # ...strip whitespace from this line...
if line != '': # ...and if we still have a line...
if label != '': # ...see if we need to insert a newline...
label += '\\n'
label += line.replace('"', '\\\"')
# after all that, see if we have a label to display
if label != '':
self.write(
'\n' +
' label="%s";\n' % label +
' labelloc="%s";\n' % self.noteloc +
' fontsize="%d";\n' % self.notesize
)
self.write( '}\n\n' )
def add_node(self, node_id, label, shape="", color="",
style="", fillcolor="", url="", htmloutput=False):
"""
Add a node to this graph. Nodes can be different shapes like boxes and
circles.
Implements GVDocBase.add_node().
"""
text = '['
if shape:
text += ' shape="%s"' % shape
if color:
text += ' color="%s"' % color
if fillcolor:
text += ' fillcolor="%s"' % fillcolor
if style:
text += ' style="%s"' % style
# note that we always output a label -- even if an empty string --
# otherwise GraphViz uses the node ID as the label which is unlikely
# to be what the user wants to see in the graph
if label.startswith("<") or htmloutput:
text += ' label=<%s>' % label
else:
text += ' label="%s"' % label
if url:
text += ' URL="%s"' % url
text += " ]"
self.write(' %s %s;\n' % (node_id, text))
def add_link(self, id1, id2, style="", head="", tail="", comment=""):
"""
Add a link between two nodes.
Implements GVDocBase.add_link().
"""
self.write(' %s -> %s' % (id1, id2))
if style or head or tail:
self.write(' [')
if style:
self.write(' style=%s' % style)
if head:
self.write(' arrowhead=%s' % head)
if tail:
self.write(' arrowtail=%s' % tail)
if head:
if tail:
self.write(' dir=both')
else:
self.write(' dir=forward')
else:
if tail:
self.write(' dir=back')
else:
self.write(' dir=none')
self.write(' ]')
self.write(';')
if comment:
self.write(' // %s' % comment)
self.write('\n')
def add_comment(self, comment):
"""
Add a comment.
Implements GVDocBase.add_comment().
"""
tmp = comment.split('\n')
for line in tmp:
text = line.strip()
if text == "":
self.write('\n')
elif text.startswith('#'):
self.write('%s\n' % text)
else:
self.write('# %s\n' % text)
def start_subgraph(self, graph_id):
""" Implement GVDocBase.start_subgraph() """
self.write(
' subgraph cluster_%s\n' % graph_id +
' {\n' +
' style="invis";\n' # no border around subgraph (#0002176)
)
def end_subgraph(self):
""" Implement GVDocBase.end_subgraph() """
self.write(' }\n')
#-------------------------------------------------------------------------------
#
# GVDotDoc
#
#-------------------------------------------------------------------------------
class GVDotDoc(GVDocBase):
""" GVDoc implementation that generates a .gv text file. """
def close(self):
""" Implements GVDotDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-3:] != ".gv":
self._filename += ".gv"
if sys.version_info[0] < 3:
dotfile = open(self._filename, "w")
else:
dotfile = open(self._filename, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
#-------------------------------------------------------------------------------
#
# GVPsDoc
#
#-------------------------------------------------------------------------------
class GVPsDoc(GVDocBase):
""" GVDoc implementation that generates a .ps file using Graphviz. """
def __init__(self, options, paper_style):
# DPI must always be 72 for PDF.
# GV documentation says dpi is only for image formats.
options.menu.get_option_by_name('dpi').set_value(72)
GVDocBase.__init__(self, options, paper_style)
# GV documentation allow multiple pages only for ps format,
# But it does not work with -Tps:cairo in order to
# show Non Latin-1 letters. Force to only 1 page.
# See bug tracker issue 2815
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPsDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-3:] != ".ps":
self._filename += ".ps"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Generate the PS file.
# Reason for using -Tps:cairo. Needed for Non Latin-1 letters
# Some testing with Tps:cairo. Non Latin-1 letters are OK i all cases:
# Output format: ps PDF-GostScript PDF-GraphViz
# Single page OK OK OK
# Multip page 1 page, OK 1 page,
# corrupted set by gramps
# If I take a correct multip page PDF and convert it with pdf2ps I get
# multip pages, but the output is clipped, some margins have
# disappeared. I used 1 inch margins always.
# See bug tracker issue 2815
# :cairo does not work with Graphviz 2.26.3 and later See issue 4164
# Covert filename to str using file system encoding.
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
command = 'dot -Tps:cairo -o"%s" "%s"' % (fname, tmp_dot)
dotversion = str(Popen(['dot', '-V'], stderr=PIPE).communicate(input=None)[1])
# Problem with dot 2.26.3 and later and multiple pages, which gives "cairo: out of
# memory" If the :cairo is skipped for these cases it gives acceptable
# result.
if (dotversion.find('2.26.3') or dotversion.find('2.28.0') != -1) and (self.vpages * self.hpages) > 1:
command = command.replace(':cairo','')
os.system(command)
# Delete the temporary dot file
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# GVSvgDoc
#
#-------------------------------------------------------------------------------
class GVSvgDoc(GVDocBase):
""" GVDoc implementation that generates a .svg file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVSvgDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".svg":
self._filename += ".svg"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Covert filename to str using file system encoding.
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
# Generate the SVG file.
os.system( 'dot -Tsvg -o"%s" "%s"' % (fname, tmp_dot) )
# Delete the temporary dot file
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# GVSvgzDoc
#
#-------------------------------------------------------------------------------
class GVSvgzDoc(GVDocBase):
""" GVDoc implementation that generates a .svg file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVSvgzDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-5:] != ".svgz":
self._filename += ".svgz"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Covert filename to str using file system encoding.
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
# Generate the SVGZ file.
os.system( 'dot -Tsvgz -o"%s" "%s"' % (fname, tmp_dot) )
# Delete the temporary dot file
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# GVPngDoc
#
#-------------------------------------------------------------------------------
class GVPngDoc(GVDocBase):
""" GVDoc implementation that generates a .png file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPngDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".png":
self._filename += ".png"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Covert filename to str using file system encoding.
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
# Generate the PNG file.
os.system( 'dot -Tpng -o"%s" "%s"' % (fname, tmp_dot) )
# Delete the temporary dot file
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# GVJpegDoc
#
#-------------------------------------------------------------------------------
class GVJpegDoc(GVDocBase):
""" GVDoc implementation that generates a .jpg file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVJpegDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".jpg":
self._filename += ".jpg"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Covert filename to str using file system encoding.
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
# Generate the JPEG file.
os.system( 'dot -Tjpg -o"%s" "%s"' % (fname, tmp_dot) )
# Delete the temporary dot file
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# GVGifDoc
#
#-------------------------------------------------------------------------------
class GVGifDoc(GVDocBase):
""" GVDoc implementation that generates a .gif file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVGifDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".gif":
self._filename += ".gif"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Covert filename to str using file system encoding.
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
# Generate the GIF file.
os.system( 'dot -Tgif -o"%s" "%s"' % (fname, tmp_dot) )
# Delete the temporary dot file
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# GVPdfGvDoc
#
#-------------------------------------------------------------------------------
class GVPdfGvDoc(GVDocBase):
""" GVDoc implementation that generates a .pdf file using Graphviz. """
def __init__(self, options, paper_style):
# DPI must always be 72 for PDF.
# GV documentation says dpi is only for image formats.
options.menu.get_option_by_name('dpi').set_value(72)
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPdfGvDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".pdf":
self._filename += ".pdf"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Convert filename to str using file system encoding.
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
# Generate the PDF file.
os.system( 'dot -Tpdf -o"%s" "%s"' % (fname, tmp_dot) )
# Delete the temporary dot file
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# GVPdfGsDoc
#
#-------------------------------------------------------------------------------
class GVPdfGsDoc(GVDocBase):
""" GVDoc implementation that generates a .pdf file using Ghostscript. """
def __init__(self, options, paper_style):
# DPI must always be 72 for PDF.
# GV documentation says dpi is only for image formats.
options.menu.get_option_by_name('dpi').set_value(72)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPdfGsDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".pdf":
self._filename += ".pdf"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv" )
if sys.version_info[0] < 3:
dotfile = os.fdopen(handle, "w")
else:
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Create a temporary PostScript file
(handle, tmp_ps) = tempfile.mkstemp(".ps" )
os.close( handle )
# Generate PostScript using dot
# Reason for using -Tps:cairo. Needed for Non Latin-1 letters
# See bug tracker issue 2815
# :cairo does not work with Graphviz 2.26.3 and later See issue 4164
command = 'dot -Tps:cairo -o"%s" "%s"' % ( tmp_ps, tmp_dot )
dotversion = str(Popen(['dot', '-V'], stderr=PIPE).communicate(input=None)[1])
# Problem with dot 2.26.3 and later and multiple pages, which gives "cairo: out
# of memory". If the :cairo is skipped for these cases it gives
# acceptable result.
if (dotversion.find('2.26.3') or dotversion.find('2.28.0') != -1) and (self.vpages * self.hpages) > 1:
command = command.replace(':cairo','')
os.system(command)
# Add .5 to remove rounding errors.
paper_size = self._paper.get_size()
width_pt = int( (paper_size.get_width_inches() * 72) + 0.5 )
height_pt = int( (paper_size.get_height_inches() * 72) + 0.5 )
# Convert to PDF using ghostscript
if sys.version_info[0] < 3:
fname = self._filename.encode(glocale.getfilesystemencoding())
else:
fname = self._filename
command = '%s -q -sDEVICE=pdfwrite -dNOPAUSE -dDEVICEWIDTHPOINTS=%d' \
' -dDEVICEHEIGHTPOINTS=%d -sOutputFile="%s" "%s" -c quit' \
% ( _GS_CMD, width_pt, height_pt, fname, tmp_ps )
os.system(command)
os.remove(tmp_ps)
os.remove(tmp_dot)
#-------------------------------------------------------------------------------
#
# Various Graphviz formats.
#
#-------------------------------------------------------------------------------
FORMATS = []
if _DOT_FOUND:
if _GS_CMD != "":
FORMATS += [{ 'type' : "gspdf",
'ext' : "pdf",
'descr': _("PDF (Ghostscript)"),
'mime' : "application/pdf",
'class': GVPdfGsDoc }]
FORMATS += [{ 'type' : "gvpdf",
'ext' : "pdf",
'descr': _("PDF (Graphviz)"),
'mime' : "application/pdf",
'class': GVPdfGvDoc }]
FORMATS += [{ 'type' : "ps",
'ext' : "ps",
'descr': _("PostScript"),
'mime' : "application/postscript",
'class': GVPsDoc }]
FORMATS += [{ 'type' : "svg",
'ext' : "svg",
'descr': _("Structured Vector Graphics (SVG)"),
'mime' : "image/svg",
'class': GVSvgDoc }]
FORMATS += [{ 'type' : "svgz",
'ext' : "svgz",
'descr': _("Compressed Structured Vector Graphs (SVGZ)"),
'mime' : "image/svgz",
'class': GVSvgzDoc }]
FORMATS += [{ 'type' : "jpg",
'ext' : "jpg",
'descr': _("JPEG image"),
'mime' : "image/jpeg",
'class': GVJpegDoc }]
FORMATS += [{ 'type' : "gif",
'ext' : "gif",
'descr': _("GIF image"),
'mime' : "image/gif",
'class': GVGifDoc }]
FORMATS += [{ 'type' : "png",
'ext' : "png",
'descr': _("PNG image"),
'mime' : "image/png",
'class': GVPngDoc }]
FORMATS += [{ 'type' : "dot",
'ext' : "gv",
'descr': _("Graphviz File"),
'mime' : "text/x-graphviz",
'class': GVDotDoc }]
| gpl-2.0 |
SmithsonianEnterprises/django-cms | cms/tests/test_permissions.py | 50 | 3054 | # -*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from django.test.utils import override_settings
from cms.models import Page
from cms.api import create_page, assign_user_to_page
from cms.cache.permissions import (get_permission_cache, set_permission_cache,
clear_user_permission_cache)
from cms.test_utils.testcases import CMSTestCase
@override_settings(CMS_PERMISSION=True)
class PermissionCacheTests(CMSTestCase):
def setUp(self):
self.user_super = self._create_user("super", is_staff=True,
is_superuser=True)
self.user_normal = self._create_user("randomuser", is_staff=True,
add_default_permissions=True)
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
def test_basic_permissions(self):
"""
Test basic permissions cache get / set / clear low-level api
"""
cached_permissions = get_permission_cache(self.user_normal, "can_change")
self.assertIsNone(cached_permissions)
set_permission_cache(self.user_normal, "can_change", [self.home_page.id])
cached_permissions = get_permission_cache(self.user_normal, "can_change")
self.assertEqual(cached_permissions, [self.home_page.id])
clear_user_permission_cache(self.user_normal)
cached_permissions = get_permission_cache(self.user_normal, "can_change")
self.assertIsNone(cached_permissions)
def test_cache_invalidation(self):
"""
Test permission cache clearing on page save
"""
set_permission_cache(self.user_normal, "can_change", [self.home_page.id])
self.home_page.save()
cached_permissions = get_permission_cache(self.user_normal, "can_change")
self.assertIsNone(cached_permissions)
def test_permission_manager(self):
"""
Test page permission manager working on a subpage
"""
page_b = create_page("page_b", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_b, self.user_normal, can_view=True,
can_change=True)
cached_permissions = get_permission_cache(self.user_normal, "can_change")
self.assertIsNone(cached_permissions)
live_permissions = Page.permissions.get_change_id_list(self.user_normal,
Site.objects.get_current())
cached_permissions_permissions = get_permission_cache(self.user_normal,
"can_change")
self.assertEqual(live_permissions, [page_b.id])
self.assertEqual(cached_permissions_permissions, live_permissions)
self.home_page.save()
cached_permissions = get_permission_cache(self.user_normal, "can_change")
self.assertIsNone(cached_permissions)
| bsd-3-clause |
jrwdunham/old | onlinelinguisticdatabase/tests/functional/test_phonologybackups.py | 1 | 8705 | # Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import codecs
import simplejson as json
from nose.tools import nottest
from onlinelinguisticdatabase.tests import TestController, url
import onlinelinguisticdatabase.model as model
from onlinelinguisticdatabase.model import Phonology
from onlinelinguisticdatabase.model.meta import Session
import onlinelinguisticdatabase.lib.helpers as h
log = logging.getLogger(__name__)
class TestPhonologybackupsController(TestController):
def __init__(self, *args, **kwargs):
TestController.__init__(self, *args, **kwargs)
self.test_phonology_script = h.normalize(
codecs.open(self.test_phonology_script_path, 'r', 'utf8').read())
def tearDown(self):
TestController.tearDown(self, dirs_to_destroy=['phonology'])
@nottest
def test_index(self):
"""Tests that ``GET /phonologybackups`` behaves correctly.
"""
# Define some extra_environs
view = {'test.authentication.role': u'viewer', 'test.application_settings': True}
contrib = {'test.authentication.role': u'contributor', 'test.application_settings': True}
admin = {'test.authentication.role': u'administrator', 'test.application_settings': True}
# Create a phonology.
params = self.phonology_create_params.copy()
params.update({
'name': u'Phonology',
'description': u'Covers a lot of the data.',
'script': self.test_phonology_script
})
params = json.dumps(params)
response = self.app.post(url('phonologies'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
phonology_count = Session.query(Phonology).count()
phonology_dir = os.path.join(self.phonologies_path, 'phonology_%d' % resp['id'])
phonology_dir_contents = os.listdir(phonology_dir)
phonology_id = resp['id']
assert phonology_count == 1
assert resp['name'] == u'Phonology'
assert resp['description'] == u'Covers a lot of the data.'
assert 'phonology.script' in phonology_dir_contents
assert response.content_type == 'application/json'
assert resp['script'] == self.test_phonology_script
# Update the phonology as the admin to create a phonology backup.
params = self.phonology_create_params.copy()
params.update({
'name': u'Phonology Renamed',
'description': u'Covers a lot of the data.',
'script': self.test_phonology_script
})
params = json.dumps(params)
response = self.app.put(url('phonology', id=phonology_id), params,
self.json_headers, admin)
resp = json.loads(response.body)
phonology_count = Session.query(model.Phonology).count()
assert response.content_type == 'application/json'
assert phonology_count == 1
# Now Update the phonology as the default contributor to create a second backup.
params = self.phonology_create_params.copy()
params.update({
'name': u'Phonology Renamed by Contributor',
'description': u'Covers a lot of the data.',
'script': self.test_phonology_script
})
params = json.dumps(params)
response = self.app.put(url('phonology', id=phonology_id), params,
self.json_headers, contrib)
resp = json.loads(response.body)
phonology_count = Session.query(model.Phonology).count()
assert phonology_count == 1
# Now GET the phonology backups (as the viewer).
response = self.app.get(url('phonologybackups'), headers=self.json_headers,
extra_environ=view)
resp = json.loads(response.body)
assert len(resp) == 2
assert response.content_type == 'application/json'
# Now update the phonology.
params = self.phonology_create_params.copy()
params.update({
'name': u'Phonology Updated',
'description': u'Covers a lot of the data.',
'script': self.test_phonology_script
})
params = json.dumps(params)
response = self.app.put(url('phonology', id=phonology_id), params,
self.json_headers, contrib)
resp = json.loads(response.body)
phonology_count = Session.query(model.Phonology).count()
assert phonology_count == 1
# Now GET the phonology backups. Admin and contrib should see 4 and the
# viewer should see 1
response = self.app.get(url('phonologybackups'), headers=self.json_headers,
extra_environ=contrib)
resp = json.loads(response.body)
all_phonology_backups = resp
assert len(resp) == 3
# Test the paginator GET params.
paginator = {'items_per_page': 1, 'page': 2}
response = self.app.get(url('phonologybackups'), paginator,
headers=self.json_headers, extra_environ=admin)
resp = json.loads(response.body)
assert len(resp['items']) == 1
assert resp['items'][0]['name'] == all_phonology_backups[1]['name']
assert response.content_type == 'application/json'
# Test the order_by GET params.
order_by_params = {'order_by_model': 'PhonologyBackup', 'order_by_attribute': 'datetime_modified',
'order_by_direction': 'desc'}
response = self.app.get(url('phonologybackups'), order_by_params,
headers=self.json_headers, extra_environ=admin)
resp = json.loads(response.body)
result_set = sorted(all_phonology_backups, key=lambda pb: pb['datetime_modified'], reverse=True)
assert [pb['id'] for pb in resp] == [pb['id'] for pb in result_set]
# Test the order_by *with* paginator.
params = {'order_by_model': 'PhonologyBackup', 'order_by_attribute': 'datetime_modified',
'order_by_direction': 'desc', 'items_per_page': 1, 'page': 3}
response = self.app.get(url('phonologybackups'), params,
headers=self.json_headers, extra_environ=admin)
resp = json.loads(response.body)
assert result_set[2]['name'] == resp['items'][0]['name']
# Now test the show action:
# Get a particular phonology backup
response = self.app.get(url('phonologybackup', id=all_phonology_backups[0]['id']),
headers=self.json_headers, extra_environ=admin)
resp = json.loads(response.body)
assert resp['name'] == all_phonology_backups[0]['name']
assert response.content_type == 'application/json'
# A nonexistent pb id will return a 404 error
response = self.app.get(url('phonologybackup', id=100987),
headers=self.json_headers, extra_environ=view, status=404)
resp = json.loads(response.body)
assert resp['error'] == u'There is no phonology backup with id 100987'
assert response.content_type == 'application/json'
# Attempting to call edit/new/create/delete/update on a read-only resource
# will return a 404 response
response = self.app.get(url('edit_phonologybackup', id=2232), status=404)
assert json.loads(response.body)['error'] == u'This resource is read-only.'
response = self.app.get(url('new_phonologybackup', id=2232), status=404)
assert json.loads(response.body)['error'] == u'This resource is read-only.'
response = self.app.post(url('phonologybackups'), status=404)
assert json.loads(response.body)['error'] == u'This resource is read-only.'
response = self.app.put(url('phonologybackup', id=2232), status=404)
assert json.loads(response.body)['error'] == u'This resource is read-only.'
response = self.app.delete(url('phonologybackup', id=2232), status=404)
assert json.loads(response.body)['error'] == u'This resource is read-only.'
assert response.content_type == 'application/json'
| apache-2.0 |
hmightypirate/guided-backprop-chainerrl | examples/mygym/guided_relu.py | 1 | 2720 | import numpy
import chainer
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_mode = cudnn.cudnn.CUDNN_ACTIVATION_RELU
class GuidedReLU(function.Function):
"""Rectified Linear Unit."""
# TODO(beam2d): Implement in-place version.
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
)
def forward_cpu(self, x):
self.retain_inputs(())
self.retain_outputs((0,))
return utils.force_array(numpy.maximum(x[0], 0, dtype=x[0].dtype)),
def forward_gpu(self, x):
if chainer.should_use_cudnn('==always') and x[0].flags.c_contiguous:
self._use_cudnn = True
y = cudnn.activation_forward(x[0], _mode)
else:
self.retain_inputs(())
self._use_cudnn = False
y = cuda.cupy.maximum(x[0], 0)
self.retain_outputs((0,))
return y,
def backward_cpu(self, x, gy):
y = self.output_data[0]
# Guided relu
if not chainer.config.train:
return utils.force_array(gy[0] * (y > 0) * (gy[0] > 0)),
else:
# Whilst training it behaves as a standard relu
return utils.force_array(gy[0] * (y > 0)),
def backward_gpu(self, x, gy):
y = self.output_data[0]
# if chainer.should_use_cudnn('==always') and self._use_cudnn:
# gx = cudnn.activation_backward(x[0], y, gy[0], _mode)
# else:
if not chainer.config.train:
gx = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = y > 0 & gy > 0 ? gy : (T)0',
'guided_relu_bwd')(y, gy[0])
else:
gx = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = y > 0 ? gy : (T)0',
'relu_bwd')(y, gy[0])
return gx,
def guided_relu(x):
"""Rectified Linear Unit function with guided backpropagation.
.. math:: f(x)=\\max(0, x).
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], 'f')
>>> np.any(x < 0)
True
>>> y = F.relu(x)
>>> np.any(y.data < 0)
False
>>> y.shape
(3, 2)
"""
return GuidedReLU()(x)
| apache-2.0 |
atmark-techno/atmark-dist | user/python/Lib/plat-irix5/TERMIOS.py | 4 | 10049 | # Generated by h2py from /usr/include/sys/termios.h
# Included from sys/ttydev.h
B0 = 0
B50 = 0000001
B75 = 0000002
B110 = 0000003
B134 = 0000004
B150 = 0000005
B200 = 0000006
B300 = 0000007
B600 = 0000010
B1200 = 0000011
B1800 = 0000012
B2400 = 0000013
B4800 = 0000014
B9600 = 0000015
B19200 = 0000016
EXTA = 0000016
B38400 = 0000017
EXTB = 0000017
# Included from sys/types.h
# Included from sgidefs.h
_MIPS_ISA_MIPS1 = 1
_MIPS_ISA_MIPS2 = 2
_MIPS_ISA_MIPS3 = 3
_MIPS_ISA_MIPS4 = 4
_MIPS_SIM_ABI32 = 1
_MIPS_SIM_NABI32 = 2
_MIPS_SIM_ABI64 = 3
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/bsd_types.h
# Included from sys/mkdev.h
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 0x7f
OMAXMIN = 0xff
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 0x1ff
MAXMIN = 0x3ffff
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def major(dev): return __major(MKDEV_VER, dev)
def minor(dev): return __minor(MKDEV_VER, dev)
# Included from sys/select.h
FD_SETSIZE = 1024
NBBY = 8
_POSIX_VDISABLE = 0
def CTRL(c): return ((c)&037)
IBSHIFT = 16
NCC = 8
NCCS = 23
VINTR = 0
VQUIT = 1
VERASE = 2
VKILL = 3
VEOF = 4
VEOL = 5
VEOL2 = 6
VMIN = 4
VTIME = 5
VSWTCH = 7
VSTART = 8
VSTOP = 9
VSUSP = 10
VDSUSP = 11
VREPRINT = 12
VDISCARD = 13
VWERASE = 14
VLNEXT = 15
VRPRNT = VREPRINT
VFLUSHO = VDISCARD
VCEOF = NCC
VCEOL = (NCC + 1)
CNUL = 0
CDEL = 0377
CESC = ord('\\')
CINTR = 0177
CQUIT = 034
CERASE = CTRL(ord('H'))
CKILL = CTRL(ord('U'))
CEOL = 0
CEOL2 = 0
CEOF = CTRL(ord('d'))
CEOT = CEOF
CSTART = CTRL(ord('q'))
CSTOP = CTRL(ord('s'))
CSWTCH = CTRL(ord('z'))
CNSWTCH = 0
CSUSP = CSWTCH
CLNEXT = CTRL(ord('v'))
CWERASE = CTRL(ord('w'))
CFLUSHO = CTRL(ord('o'))
CFLUSH = CFLUSHO
CRPRNT = CTRL(ord('r'))
CDSUSP = CTRL(ord('y'))
CBRK = 0377
IGNBRK = 0000001
BRKINT = 0000002
IGNPAR = 0000004
PARMRK = 0000010
INPCK = 0000020
ISTRIP = 0000040
INLCR = 0000100
IGNCR = 0000200
ICRNL = 0000400
IUCLC = 0001000
IXON = 0002000
IXANY = 0004000
IXOFF = 0010000
IMAXBEL = 0020000
IBLKMD = 0040000
OPOST = 0000001
OLCUC = 0000002
ONLCR = 0000004
OCRNL = 0000010
ONOCR = 0000020
ONLRET = 0000040
OFILL = 0000100
OFDEL = 0000200
NLDLY = 0000400
NL0 = 0
NL1 = 0000400
CRDLY = 0003000
CR0 = 0
CR1 = 0001000
CR2 = 0002000
CR3 = 0003000
TABDLY = 0014000
TAB0 = 0
TAB1 = 0004000
TAB2 = 0010000
TAB3 = 0014000
XTABS = 0014000
BSDLY = 0020000
BS0 = 0
BS1 = 0020000
VTDLY = 0040000
VT0 = 0
VT1 = 0040000
FFDLY = 0100000
FF0 = 0
FF1 = 0100000
PAGEOUT = 0200000
WRAP = 0400000
CBAUD = 000000017
CSIZE = 000000060
CS5 = 0
CS6 = 000000020
CS7 = 000000040
CS8 = 000000060
CSTOPB = 000000100
CREAD = 000000200
PARENB = 000000400
PARODD = 000001000
HUPCL = 000002000
CLOCAL = 000004000
RCV1EN = 000010000
XMT1EN = 000020000
LOBLK = 000040000
XCLUDE = 000100000
CIBAUD = 003600000
PAREXT = 004000000
CNEW_RTSCTS = 010000000
ISIG = 0000001
ICANON = 0000002
XCASE = 0000004
ECHO = 0000010
ECHOE = 0000020
ECHOK = 0000040
ECHONL = 0000100
NOFLSH = 0000200
IEXTEN = 0000400
ITOSTOP = 0100000
TOSTOP = ITOSTOP
ECHOCTL = 0001000
ECHOPRT = 0002000
ECHOKE = 0004000
DEFECHO = 0010000
FLUSHO = 0020000
PENDIN = 0040000
TIOC = (ord('T')<<8)
TCGETA = (TIOC|1)
TCSETA = (TIOC|2)
TCSETAW = (TIOC|3)
TCSETAF = (TIOC|4)
TCSBRK = (TIOC|5)
TCXONC = (TIOC|6)
TCFLSH = (TIOC|7)
# Included from sys/ioctl.h
IOCTYPE = 0xff00
LIOC = (ord('l')<<8)
LIOCGETP = (LIOC|1)
LIOCSETP = (LIOC|2)
LIOCGETS = (LIOC|5)
LIOCSETS = (LIOC|6)
DIOC = (ord('d')<<8)
DIOCGETC = (DIOC|1)
DIOCGETB = (DIOC|2)
DIOCSETE = (DIOC|3)
# Included from sys/ioccom.h
IOCPARM_MASK = 0xff
IOC_VOID = 0x20000000
IOC_OUT = 0x40000000
IOC_IN = 0x80000000
IOC_INOUT = (IOC_IN|IOC_OUT)
# Included from net/soioctl.h
# Included from sys/termio.h
# Included from sys/termios.h
_POSIX_VDISABLE = 0
def CTRL(c): return ((c)&037)
IBSHIFT = 16
NCC = 8
NCCS = 23
VINTR = 0
VQUIT = 1
VERASE = 2
VKILL = 3
VEOF = 4
VEOL = 5
VEOL2 = 6
VMIN = 4
VTIME = 5
VSWTCH = 7
VSTART = 8
VSTOP = 9
VSUSP = 10
VDSUSP = 11
VREPRINT = 12
VDISCARD = 13
VWERASE = 14
VLNEXT = 15
VRPRNT = VREPRINT
VFLUSHO = VDISCARD
VCEOF = NCC
VCEOL = (NCC + 1)
CNUL = 0
CDEL = 0377
CESC = ord('\\')
CINTR = 0177
CQUIT = 034
CERASE = CTRL(ord('H'))
CKILL = CTRL(ord('U'))
CEOL = 0
CEOL2 = 0
CEOF = CTRL(ord('d'))
CEOT = CEOF
CSTART = CTRL(ord('q'))
CSTOP = CTRL(ord('s'))
CSWTCH = CTRL(ord('z'))
CNSWTCH = 0
CSUSP = CSWTCH
CLNEXT = CTRL(ord('v'))
CWERASE = CTRL(ord('w'))
CFLUSHO = CTRL(ord('o'))
CFLUSH = CFLUSHO
CRPRNT = CTRL(ord('r'))
CDSUSP = CTRL(ord('y'))
CBRK = 0377
IGNBRK = 0000001
BRKINT = 0000002
IGNPAR = 0000004
PARMRK = 0000010
INPCK = 0000020
ISTRIP = 0000040
INLCR = 0000100
IGNCR = 0000200
ICRNL = 0000400
IUCLC = 0001000
IXON = 0002000
IXANY = 0004000
IXOFF = 0010000
IMAXBEL = 0020000
IBLKMD = 0040000
OPOST = 0000001
OLCUC = 0000002
ONLCR = 0000004
OCRNL = 0000010
ONOCR = 0000020
ONLRET = 0000040
OFILL = 0000100
OFDEL = 0000200
NLDLY = 0000400
NL0 = 0
NL1 = 0000400
CRDLY = 0003000
CR0 = 0
CR1 = 0001000
CR2 = 0002000
CR3 = 0003000
TABDLY = 0014000
TAB0 = 0
TAB1 = 0004000
TAB2 = 0010000
TAB3 = 0014000
XTABS = 0014000
BSDLY = 0020000
BS0 = 0
BS1 = 0020000
VTDLY = 0040000
VT0 = 0
VT1 = 0040000
FFDLY = 0100000
FF0 = 0
FF1 = 0100000
PAGEOUT = 0200000
WRAP = 0400000
CBAUD = 000000017
CSIZE = 000000060
CS5 = 0
CS6 = 000000020
CS7 = 000000040
CS8 = 000000060
CSTOPB = 000000100
CREAD = 000000200
PARENB = 000000400
PARODD = 000001000
HUPCL = 000002000
CLOCAL = 000004000
RCV1EN = 000010000
XMT1EN = 000020000
LOBLK = 000040000
XCLUDE = 000100000
CIBAUD = 003600000
PAREXT = 004000000
CNEW_RTSCTS = 010000000
ISIG = 0000001
ICANON = 0000002
XCASE = 0000004
ECHO = 0000010
ECHOE = 0000020
ECHOK = 0000040
ECHONL = 0000100
NOFLSH = 0000200
IEXTEN = 0000400
ITOSTOP = 0100000
TOSTOP = ITOSTOP
ECHOCTL = 0001000
ECHOPRT = 0002000
ECHOKE = 0004000
DEFECHO = 0010000
FLUSHO = 0020000
PENDIN = 0040000
TIOC = (ord('T')<<8)
TCGETA = (TIOC|1)
TCSETA = (TIOC|2)
TCSETAW = (TIOC|3)
TCSETAF = (TIOC|4)
TCSBRK = (TIOC|5)
TCXONC = (TIOC|6)
TCFLSH = (TIOC|7)
LDISC0 = 0
LDISC1 = 1
NTTYDISC = LDISC1
TIOCFLUSH = (TIOC|12)
TCSETLABEL = (TIOC|31)
TCDSET = (TIOC|32)
TCBLKMD = (TIOC|33)
TIOCPKT = (TIOC|112)
TIOCPKT_DATA = 0x00
TIOCPKT_FLUSHREAD = 0x01
TIOCPKT_FLUSHWRITE = 0x02
TIOCPKT_NOSTOP = 0x10
TIOCPKT_DOSTOP = 0x20
TIOCPKT_IOCTL = 0x40
TIOCNOTTY = (TIOC|113)
TIOCSTI = (TIOC|114)
TFIOC = (ord('F')<<8)
oFIONREAD = (TFIOC|127)
TO_STOP = LOBLK
IOCTYPE = 0xff00
TCGETS = (TIOC|13)
TCSETS = (TIOC|14)
TCSETSW = (TIOC|15)
TCSETSF = (TIOC|16)
TCSANOW = ((ord('T')<<8)|14)
TCSADRAIN = ((ord('T')<<8)|15)
TCSAFLUSH = ((ord('T')<<8)|16)
TCIFLUSH = 0
TCOFLUSH = 1
TCIOFLUSH = 2
TCOOFF = 0
TCOON = 1
TCIOFF = 2
TCION = 3
tIOC = (ord('t')<<8)
TIOCGETD = (tIOC|0)
TIOCSETD = (tIOC|1)
TIOCHPCL = (tIOC|2)
TIOCGETP = (tIOC|8)
TIOCSETP = (tIOC|9)
TIOCSETN = (tIOC|10)
TIOCEXCL = (tIOC|13)
TIOCNXCL = (tIOC|14)
TIOCSETC = (tIOC|17)
TIOCGETC = (tIOC|18)
TIOCLBIS = (tIOC|127)
TIOCLBIC = (tIOC|126)
TIOCLSET = (tIOC|125)
TIOCLGET = (tIOC|124)
TIOCSBRK = (tIOC|123)
TIOCCBRK = (tIOC|122)
TIOCSDTR = (tIOC|121)
TIOCCDTR = (tIOC|120)
TIOCSLTC = (tIOC|117)
TIOCGLTC = (tIOC|116)
TIOCOUTQ = (tIOC|115)
TIOCSTOP = (tIOC|111)
TIOCSTART = (tIOC|110)
TIOCGSID = (tIOC|22)
TIOCSSID = (tIOC|24)
TIOCMSET = (tIOC|26)
TIOCMBIS = (tIOC|27)
TIOCMBIC = (tIOC|28)
TIOCMGET = (tIOC|29)
TIOCM_LE = 0001
TIOCM_DTR = 0002
TIOCM_RTS = 0004
TIOCM_ST = 0010
TIOCM_SR = 0020
TIOCM_CTS = 0040
TIOCM_CAR = 0100
TIOCM_CD = TIOCM_CAR
TIOCM_RNG = 0200
TIOCM_RI = TIOCM_RNG
TIOCM_DSR = 0400
TIOCREMOTE = (tIOC|30)
TIOCSIGNAL = (tIOC|31)
ISPTM = ((ord('P')<<8)|1)
UNLKPT = ((ord('P')<<8)|2)
SVR4SOPEN = ((ord('P')<<8)|100)
LDIOC = (ord('D')<<8)
LDOPEN = (LDIOC|0)
LDCLOSE = (LDIOC|1)
LDCHG = (LDIOC|2)
LDGETT = (LDIOC|8)
LDSETT = (LDIOC|9)
LDSMAP = (LDIOC|10)
LDGMAP = (LDIOC|11)
LDNMAP = (LDIOC|12)
DIOC = (ord('d')<<8)
DIOCGETP = (DIOC|8)
DIOCSETP = (DIOC|9)
FIORDCHK = ((ord('f')<<8)|3)
CLNEXT = CTRL(ord('v'))
CWERASE = CTRL(ord('w'))
CFLUSHO = CTRL(ord('o'))
CFLUSH = CFLUSHO
CRPRNT = CTRL(ord('r'))
CDSUSP = CTRL(ord('y'))
SSPEED = B9600
TERM_NONE = 0
TERM_TEC = 1
TERM_V61 = 2
TERM_V10 = 3
TERM_TEX = 4
TERM_D40 = 5
TERM_H45 = 6
TERM_D42 = 7
TM_NONE = 0000
TM_SNL = 0001
TM_ANL = 0002
TM_LCF = 0004
TM_CECHO = 0010
TM_CINVIS = 0020
TM_SET = 0200
LDISC0 = 0
LDISC1 = 1
NTTYDISC = LDISC1
TIOCFLUSH = (TIOC|12)
TCSETLABEL = (TIOC|31)
TCDSET = (TIOC|32)
TCBLKMD = (TIOC|33)
TIOCPKT = (TIOC|112)
TIOCPKT_DATA = 0x00
TIOCPKT_FLUSHREAD = 0x01
TIOCPKT_FLUSHWRITE = 0x02
TIOCPKT_NOSTOP = 0x10
TIOCPKT_DOSTOP = 0x20
TIOCPKT_IOCTL = 0x40
TIOCNOTTY = (TIOC|113)
TIOCSTI = (TIOC|114)
TFIOC = (ord('F')<<8)
oFIONREAD = (TFIOC|127)
TO_STOP = LOBLK
IOCTYPE = 0xff00
TCGETS = (TIOC|13)
TCSETS = (TIOC|14)
TCSETSW = (TIOC|15)
TCSETSF = (TIOC|16)
TCSANOW = ((ord('T')<<8)|14)
TCSADRAIN = ((ord('T')<<8)|15)
TCSAFLUSH = ((ord('T')<<8)|16)
TCIFLUSH = 0
TCOFLUSH = 1
TCIOFLUSH = 2
TCOOFF = 0
TCOON = 1
TCIOFF = 2
TCION = 3
tIOC = (ord('t')<<8)
TIOCGETD = (tIOC|0)
TIOCSETD = (tIOC|1)
TIOCHPCL = (tIOC|2)
TIOCGETP = (tIOC|8)
TIOCSETP = (tIOC|9)
TIOCSETN = (tIOC|10)
TIOCEXCL = (tIOC|13)
TIOCNXCL = (tIOC|14)
TIOCSETC = (tIOC|17)
TIOCGETC = (tIOC|18)
TIOCLBIS = (tIOC|127)
TIOCLBIC = (tIOC|126)
TIOCLSET = (tIOC|125)
TIOCLGET = (tIOC|124)
TIOCSBRK = (tIOC|123)
TIOCCBRK = (tIOC|122)
TIOCSDTR = (tIOC|121)
TIOCCDTR = (tIOC|120)
TIOCSLTC = (tIOC|117)
TIOCGLTC = (tIOC|116)
TIOCOUTQ = (tIOC|115)
TIOCSTOP = (tIOC|111)
TIOCSTART = (tIOC|110)
TIOCGSID = (tIOC|22)
TIOCSSID = (tIOC|24)
TIOCMSET = (tIOC|26)
TIOCMBIS = (tIOC|27)
TIOCMBIC = (tIOC|28)
TIOCMGET = (tIOC|29)
TIOCM_LE = 0001
TIOCM_DTR = 0002
TIOCM_RTS = 0004
TIOCM_ST = 0010
TIOCM_SR = 0020
TIOCM_CTS = 0040
TIOCM_CAR = 0100
TIOCM_CD = TIOCM_CAR
TIOCM_RNG = 0200
TIOCM_RI = TIOCM_RNG
TIOCM_DSR = 0400
TIOCREMOTE = (tIOC|30)
TIOCSIGNAL = (tIOC|31)
ISPTM = ((ord('P')<<8)|1)
UNLKPT = ((ord('P')<<8)|2)
SVR4SOPEN = ((ord('P')<<8)|100)
LDIOC = (ord('D')<<8)
LDOPEN = (LDIOC|0)
LDCLOSE = (LDIOC|1)
LDCHG = (LDIOC|2)
LDGETT = (LDIOC|8)
LDSETT = (LDIOC|9)
LDSMAP = (LDIOC|10)
LDGMAP = (LDIOC|11)
LDNMAP = (LDIOC|12)
DIOC = (ord('d')<<8)
DIOCGETP = (DIOC|8)
DIOCSETP = (DIOC|9)
FIORDCHK = ((ord('f')<<8)|3)
| gpl-2.0 |
goldmedal/spark | python/pyspark/sql/tests/test_group.py | 21 | 1831 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import Row
from pyspark.testing.sqlutils import ReusedSQLTestCase
class GroupTests(ReusedSQLTestCase):
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approx_count_distinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_group import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/list_tests.py | 106 | 17676 | """
Tests common to list and UserList.UserList
"""
import sys
import os
from functools import cmp_to_key
from test import support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(repr(a2), repr(l2))
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
l0 = []
for i in range(sys.getrecursionlimit() + 100):
l0 = [l0]
self.assertRaises(RuntimeError, repr, l0)
def test_print(self):
d = self.type2test(range(200))
d.append(d)
d.extend(range(200,400))
d.append(d)
d.append(400)
try:
with open(support.TESTFN, "w") as fo:
fo.write(str(d))
with open(support.TESTFN, "r") as fo:
self.assertEqual(fo.read(), repr(d))
finally:
os.remove(support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, next, r)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
# Bug 3689: make sure list-reversed-iterator doesn't have __len__
self.assertRaises(TypeError, len, reversed([1,2,3]))
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0] = 1
a[1] = 2
a[2] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2] = 88
a[-1] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5))
self.assertRaises(TypeError, a.__setitem__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertIs(x, y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in range(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_clear(self):
u = self.type2test([2, 3, 4])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.append(1)
u.clear()
u.append(2)
self.assertEqual(u, [2])
self.assertRaises(TypeError, u.clear, None)
def test_copy(self):
u = self.type2test([1, 2, 3])
v = u.copy()
self.assertEqual(v, [1, 2, 3])
u = self.type2test([])
v = u.copy()
self.assertEqual(v, [])
# test that it's indeed a copy and not a reference
u = self.type2test(['a', 'b'])
v = u.copy()
v.append('i')
self.assertEqual(u, ['a', 'b'])
self.assertEqual(v, u + ['i'])
# test that it's a shallow, not a deep copy
u = self.type2test([1, 2, [3, 4], 5])
v = u.copy()
self.assertEqual(u, v)
self.assertIs(v[3], u[3])
self.assertRaises(TypeError, u.copy, None)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
if a == b:
return 0
elif a < b:
return 1
else: # a > b
return -1
u.sort(key=cmp_to_key(revcmp))
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
xmod, ymod = x%3, y%7
if xmod == ymod:
return 0
elif xmod < ymod:
return -1
else: # xmod > ymod
return 1
z = self.type2test(range(12))
z.sort(key=cmp_to_key(myComparison))
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
if x == y:
return 0
elif x < y:
return -1
else: # x > y
return 1
self.assertRaises(ValueError, z.sort,
key=cmp_to_key(selfmodifyingComparison))
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super().test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assertIs(u, u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
# test issue7788
a = self.type2test(range(10))
del a[9::1<<333]
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
| mit |
vbshah1992/microblog | flask/lib/python2.7/site-packages/markupsafe/_native.py | 1243 | 1187 | # -*- coding: utf-8 -*-
"""
markupsafe._native
~~~~~~~~~~~~~~~~~~
Native Python implementation the C module is not compiled.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from markupsafe import Markup
from markupsafe._compat import text_type
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(text_type(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
def escape_silent(s):
"""Like :func:`escape` but converts `None` into an empty
markup string.
"""
if s is None:
return Markup()
return escape(s)
def soft_unicode(s):
"""Make a string unicode if it isn't already. That way a markup
string is not converted back to unicode.
"""
if not isinstance(s, text_type):
s = text_type(s)
return s
| bsd-3-clause |
bowang/tensorflow | tensorflow/python/ops/nn_fused_batchnorm_test.py | 5 | 14553 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused_batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class BatchNormalizationTest(test.TestCase):
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
y = nn_impl.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval()
def _test_inference(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
mean = constant_op.constant(mean_val, name='mean')
var = constant_op.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val = sess.run(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
def _training_ref(self, x, scale, offset, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
mean, var = nn_impl.moments(x, [0, 1, 2], keep_dims=False)
y = nn_impl.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval(), mean.eval(), var.eval()
def _test_training(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = sess.run([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.nn.fused_batch_norm has Bessel's correction built in.
sample_size = x_val.size / scale_val.size
var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0))
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _test_gradient(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(np.float32)
pop_var = np.random.random_sample(scale_shape).astype(np.float32)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(scale, scale_shape, y,
x_shape)
err_offset = gradient_checker.compute_gradient_error(offset, scale_shape,
y, x_shape)
err_tolerance = 1e-3
self.assertLess(err_x, err_tolerance)
self.assertLess(err_scale, err_tolerance)
self.assertLess(err_offset, err_tolerance)
def _test_grad_grad(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC',
is_training=True,
err_tolerance=1e-3):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
grad_y_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
grad_y = constant_op.constant(grad_y_val, name='grad_y')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(np.float32)
pop_var = np.random.random_sample(scale_shape).astype(np.float32)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x, grad_scale, grad_offset = gradients_impl.gradients(
y, [x, scale, offset], grad_y)
if is_training:
epsilon = y.op.get_attr('epsilon')
data_format = y.op.get_attr('data_format')
grad_vals = sess.run([grad_x, grad_scale, grad_offset])
grad_internal = nn_grad._BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format)
grad_internal_vals = sess.run(list(grad_internal))
for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals):
self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance)
err_grad_grad_y_1 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_x, x_shape)
err_grad_grad_y_2 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_scale, scale_shape)
err_grad_grad_y_3 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_offset, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = gradient_checker.compute_gradient_error(
x, x_shape, grad_x, x_shape)
err_grad_x_2 = gradient_checker.compute_gradient_error(
x, x_shape, grad_scale, scale_shape)
err_grad_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, grad_x, x_shape)
self.assertLess(err_grad_grad_y_1, err_tolerance)
self.assertLess(err_grad_grad_y_2, err_tolerance)
self.assertLess(err_grad_grad_y_3, err_tolerance)
if is_training:
self.assertLess(err_grad_x_1, err_tolerance)
self.assertLess(err_grad_x_2, err_tolerance)
self.assertLess(err_grad_scale, err_tolerance)
def testInference(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [6], use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [6], use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [2],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
x_shape = [7, 9, 13, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [9],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
def _testBatchNormGradGrad(self, config):
shape = config['shape']
err_tolerance = config['err_tolerance']
for is_training in [True, False]:
if test.is_gpu_available(cuda_only=True):
self._test_grad_grad(
shape, [shape[3]],
use_gpu=True,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape, [shape[1]],
use_gpu=True,
data_format='NCHW',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape, [shape[3]],
use_gpu=False,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
def testBatchNormGradGrad(self):
configs = [{
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2
}, {
'shape': [2, 3, 2, 2],
'err_tolerance': 1e-3
}]
for config in configs:
self._testBatchNormGradGrad(config)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Venturi/cms | env/lib/python2.7/site-packages/cms/cms_plugins.py | 46 | 4350 | # -*- coding: utf-8 -*-
from cms.models import CMSPlugin, Placeholder
from cms.models.aliaspluginmodel import AliasPluginModel
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.plugin_base import CMSPluginBase, PluginMenuItem
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.utils.urlutils import admin_reverse
from django.conf.urls import url
from django.http import HttpResponseForbidden, HttpResponseBadRequest, HttpResponse
from django.middleware.csrf import get_token
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, get_language
class PlaceholderPlugin(CMSPluginBase):
name = _("Placeholder")
parent_classes = [0] # so you will not be able to add it something
#require_parent = True
render_plugin = False
admin_preview = False
system = True
model = PlaceholderReference
plugin_pool.register_plugin(PlaceholderPlugin)
class AliasPlugin(CMSPluginBase):
name = _("Alias")
allow_children = False
model = AliasPluginModel
render_template = "cms/plugins/alias.html"
system = True
def render(self, context, instance, placeholder):
from cms.utils.plugins import downcast_plugins, build_plugin_tree
context['instance'] = instance
context['placeholder'] = placeholder
if instance.plugin_id:
plugins = instance.plugin.get_descendants().order_by('placeholder', 'path')
plugins = [instance.plugin] + list(plugins)
plugins = downcast_plugins(plugins)
plugins[0].parent_id = None
plugins = build_plugin_tree(plugins)
context['plugins'] = plugins
if instance.alias_placeholder_id:
content = render_placeholder(instance.alias_placeholder, context)
context['content'] = mark_safe(content)
return context
def get_extra_global_plugin_menu_items(self, request, plugin):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'plugin_id': plugin.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_extra_placeholder_menu_items(self, request, placeholder):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'placeholder_id': placeholder.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_plugin_urls(self):
return [
url(r'^create_alias/$', self.create_alias, name='cms_create_alias'),
]
def create_alias(self, request):
if not request.user.is_staff:
return HttpResponseForbidden("not enough privileges")
if not 'plugin_id' in request.POST and not 'placeholder_id' in request.POST:
return HttpResponseBadRequest("plugin_id or placeholder_id POST parameter missing.")
plugin = None
placeholder = None
if 'plugin_id' in request.POST:
pk = request.POST['plugin_id']
try:
plugin = CMSPlugin.objects.get(pk=pk)
except CMSPlugin.DoesNotExist:
return HttpResponseBadRequest("plugin with id %s not found." % pk)
if 'placeholder_id' in request.POST:
pk = request.POST['placeholder_id']
try:
placeholder = Placeholder.objects.get(pk=pk)
except Placeholder.DoesNotExist:
return HttpResponseBadRequest("placeholder with id %s not found." % pk)
if not placeholder.has_change_permission(request):
return HttpResponseBadRequest("You do not have enough permission to alias this placeholder.")
clipboard = request.toolbar.clipboard
clipboard.cmsplugin_set.all().delete()
language = get_language()
if plugin:
language = plugin.language
alias = AliasPluginModel(language=language, placeholder=clipboard, plugin_type="AliasPlugin")
if plugin:
alias.plugin = plugin
if placeholder:
alias.alias_placeholder = placeholder
alias.save()
return HttpResponse("ok")
plugin_pool.register_plugin(AliasPlugin)
| gpl-2.0 |
thedrow/samsa | pykafka/connection.py | 2 | 4337 | from __future__ import division
"""
Author: Keith Bourgoin, Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["BrokerConnection"]
import logging
import socket
import struct
from .exceptions import SocketDisconnectedError
from .utils.socket import recvall_into
from .utils.compat import buffer
log = logging.getLogger(__name__)
class BrokerConnection(object):
"""
BrokerConnection thinly wraps a `socket.create_connection` call
and handles the sending and receiving of data that conform to the
kafka binary protocol over that socket.
"""
def __init__(self,
host,
port,
buffer_size=1024 * 1024,
source_host='',
source_port=0):
"""Initialize a socket connection to Kafka.
:param host: The host to which to connect
:type host: str
:param port: The port on the host to which to connect
:type port: int
:param buffer_size: The size (in bytes) of the buffer in which to
hold response data.
:type buffer_size: int
:param source_host: The host portion of the source address for
the socket connection
:type source_host: str
:param source_port: The port portion of the source address for
the socket connection
:type source_port: int
"""
self._buff = bytearray(buffer_size)
self.host = host
self.port = port
self._socket = None
self.source_host = source_host
self.source_port = source_port
def __del__(self):
"""Close this connection when the object is deleted."""
self.disconnect()
@property
def connected(self):
"""Returns true if the socket connection is open."""
return self._socket is not None
def connect(self, timeout):
"""Connect to the broker."""
log.debug("Connecting to %s:%s", self.host, self.port)
self._socket = socket.create_connection(
(self.host, self.port),
timeout / 1000,
(self.source_host, self.source_port)
)
if self._socket is not None:
log.debug("Successfully connected to %s:%s", self.host, self.port)
def disconnect(self):
"""Disconnect from the broker."""
if self._socket is None:
return
try:
self._socket.close()
except IOError:
pass
finally:
self._socket = None
def reconnect(self):
"""Disconnect from the broker, then reconnect"""
self.disconnect()
self.connect(10 * 1000)
def request(self, request):
"""Send a request over the socket connection"""
bytes_ = request.get_bytes()
if not self._socket:
raise SocketDisconnectedError
try:
self._socket.sendall(bytes_)
except SocketDisconnectedError:
self.disconnect()
raise
def response(self):
"""Wait for a response from the broker"""
size = bytes()
expected_len = 4 # Size => int32
while len(size) != expected_len:
try:
r = self._socket.recv(expected_len - len(size))
except IOError:
r = None
if r is None or len(r) == 0:
# Happens when broker has shut down
self.disconnect()
raise SocketDisconnectedError
size += r
size = struct.unpack('!i', size)[0]
try:
recvall_into(self._socket, self._buff, size)
except SocketDisconnectedError:
self.disconnect()
raise
# Drop CorrelationId => int32
return buffer(self._buff[4:4 + size])
| apache-2.0 |
massmutual/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
ddico/odoo | addons/website/models/website_visitor.py | 1 | 13772 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import uuid
from odoo import fields, models, api, registry, _
from odoo.addons.base.models.res_partner import _tz_get
from odoo.exceptions import UserError
from odoo.tools.misc import _format_time_ago
from odoo.http import request
from odoo.osv import expression
class WebsiteTrack(models.Model):
_name = 'website.track'
_description = 'Visited Pages'
_order = 'visit_datetime DESC'
_log_access = False
visitor_id = fields.Many2one('website.visitor', ondelete="cascade", index=True, required=True, readonly=True)
page_id = fields.Many2one('website.page', index=True, ondelete='cascade', readonly=True)
url = fields.Text('Url', index=True)
visit_datetime = fields.Datetime('Visit Date', default=fields.Datetime.now, required=True, readonly=True)
class WebsiteVisitor(models.Model):
_name = 'website.visitor'
_description = 'Website Visitor'
_order = 'last_connection_datetime DESC'
name = fields.Char('Name')
access_token = fields.Char(required=True, default=lambda x: uuid.uuid4().hex, index=True, copy=False, groups='base.group_website_publisher')
active = fields.Boolean('Active', default=True)
website_id = fields.Many2one('website', "Website", readonly=True)
partner_id = fields.Many2one('res.partner', string="Linked Partner", help="Partner of the last logged in user.")
partner_image = fields.Binary(related='partner_id.image_1920')
# localisation and info
country_id = fields.Many2one('res.country', 'Country', readonly=True)
country_flag = fields.Binary(related="country_id.image", string="Country Flag")
lang_id = fields.Many2one('res.lang', string='Language', help="Language from the website when visitor has been created")
timezone = fields.Selection(_tz_get, string='Timezone')
email = fields.Char(string='Email', compute='_compute_email_phone')
mobile = fields.Char(string='Mobile Phone', compute='_compute_email_phone')
# Visit fields
visit_count = fields.Integer('Number of visits', default=1, readonly=True, help="A new visit is considered if last connection was more than 8 hours ago.")
website_track_ids = fields.One2many('website.track', 'visitor_id', string='Visited Pages History', readonly=True)
visitor_page_count = fields.Integer('Page Views', compute="_compute_page_statistics", help="Total number of visits on tracked pages")
page_ids = fields.Many2many('website.page', string="Visited Pages", compute="_compute_page_statistics")
page_count = fields.Integer('# Visited Pages', compute="_compute_page_statistics", help="Total number of tracked page visited")
last_visited_page_id = fields.Many2one('website.page', string="Last Visited Page", compute="_compute_last_visited_page_id")
# Time fields
create_date = fields.Datetime('First connection date', readonly=True)
last_connection_datetime = fields.Datetime('Last Connection', default=fields.Datetime.now, help="Last page view date", readonly=True)
time_since_last_action = fields.Char('Last action', compute="_compute_time_statistics", help='Time since last page view. E.g.: 2 minutes ago')
is_connected = fields.Boolean('Is connected ?', compute='_compute_time_statistics', help='A visitor is considered as connected if his last page view was within the last 5 minutes.')
_sql_constraints = [
('access_token_unique', 'unique(access_token)', 'Access token should be unique.'),
('partner_uniq', 'unique(partner_id)', 'A partner is linked to only one visitor.'),
]
@api.depends('name')
def name_get(self):
return [(
record.id,
(record.name or _('Website Visitor #%s', record.id))
) for record in self]
@api.depends('partner_id.email_normalized', 'partner_id.mobile', 'partner_id.phone')
def _compute_email_phone(self):
results = self.env['res.partner'].search_read(
[('id', 'in', self.partner_id.ids)],
['id', 'email_normalized', 'mobile', 'phone'],
)
mapped_data = {
result['id']: {
'email_normalized': result['email_normalized'],
'mobile': result['mobile'] if result['mobile'] else result['phone']
} for result in results
}
for visitor in self:
visitor.email = mapped_data.get(visitor.partner_id.id, {}).get('email_normalized')
visitor.mobile = mapped_data.get(visitor.partner_id.id, {}).get('mobile')
@api.depends('website_track_ids')
def _compute_page_statistics(self):
results = self.env['website.track'].read_group(
[('visitor_id', 'in', self.ids), ('url', '!=', False)], ['visitor_id', 'page_id', 'url'], ['visitor_id', 'page_id', 'url'], lazy=False)
mapped_data = {}
for result in results:
visitor_info = mapped_data.get(result['visitor_id'][0], {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor_info['visitor_page_count'] += result['__count']
visitor_info['page_count'] += 1
if result['page_id']:
visitor_info['page_ids'].add(result['page_id'][0])
mapped_data[result['visitor_id'][0]] = visitor_info
for visitor in self:
visitor_info = mapped_data.get(visitor.id, {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor.page_ids = [(6, 0, visitor_info['page_ids'])]
visitor.visitor_page_count = visitor_info['visitor_page_count']
visitor.page_count = visitor_info['page_count']
@api.depends('website_track_ids.page_id')
def _compute_last_visited_page_id(self):
results = self.env['website.track'].read_group([('visitor_id', 'in', self.ids)],
['visitor_id', 'page_id', 'visit_datetime:max'],
['visitor_id', 'page_id'], lazy=False)
mapped_data = {result['visitor_id'][0]: result['page_id'][0] for result in results if result['page_id']}
for visitor in self:
visitor.last_visited_page_id = mapped_data.get(visitor.id, False)
@api.depends('last_connection_datetime')
def _compute_time_statistics(self):
for visitor in self:
visitor.time_since_last_action = _format_time_ago(self.env, (datetime.now() - visitor.last_connection_datetime))
visitor.is_connected = (datetime.now() - visitor.last_connection_datetime) < timedelta(minutes=5)
def _check_for_message_composer(self):
""" Purpose of this method is to actualize visitor model prior to contacting
him. Used notably for inheritance purpose, when dealing with leads that
could update the visitor model. """
return bool(self.partner_id and self.partner_id.email)
def _prepare_message_composer_context(self):
return {
'default_model': 'res.partner',
'default_res_id': self.partner_id.id,
'default_partner_ids': [self.partner_id.id],
}
def action_send_mail(self):
self.ensure_one()
if not self._check_for_message_composer():
raise UserError(_("There is no contact and/or no email linked this visitor."))
visitor_composer_ctx = self._prepare_message_composer_context()
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
compose_ctx = dict(
default_use_template=False,
default_composition_mode='comment',
)
compose_ctx.update(**visitor_composer_ctx)
return {
'name': _('Contact Visitor'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': compose_ctx,
}
def _get_visitor_from_request(self, force_create=False):
""" Return the visitor as sudo from the request if there is a visitor_uuid cookie.
It is possible that the partner has changed or has disconnected.
In that case the cookie is still referencing the old visitor and need to be replaced
with the one of the visitor returned !!!. """
# This function can be called in json with mobile app.
# In case of mobile app, no uid is set on the jsonRequest env.
# In case of multi db, _env is None on request, and request.env unbound.
if not request:
return None
Visitor = self.env['website.visitor'].sudo()
visitor = Visitor
access_token = request.httprequest.cookies.get('visitor_uuid')
if access_token:
visitor = Visitor.with_context(active_test=False).search([('access_token', '=', access_token)])
# Prefetch access_token and other fields. Since access_token has a restricted group and we access
# a non restricted field (partner_id) first it is not fetched and will require an additional query to be retrieved.
visitor.access_token
if not self.env.user._is_public():
partner_id = self.env.user.partner_id
if not visitor or visitor.partner_id and visitor.partner_id != partner_id:
# Partner and no cookie or wrong cookie
visitor = Visitor.with_context(active_test=False).search([('partner_id', '=', partner_id.id)])
elif visitor and visitor.partner_id:
# Cookie associated to a Partner
visitor = Visitor
if force_create and not visitor:
visitor = self._create_visitor()
return visitor
def _handle_webpage_dispatch(self, response, website_page):
# get visitor. Done here to avoid having to do it multiple times in case of override.
visitor_sudo = self._get_visitor_from_request(force_create=True)
if request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token:
expiration_date = datetime.now() + timedelta(days=365)
response.set_cookie('visitor_uuid', visitor_sudo.access_token, expires=expiration_date)
self._handle_website_page_visit(website_page, visitor_sudo)
def _handle_website_page_visit(self, website_page, visitor_sudo):
""" Called on dispatch. This will create a website.visitor if the http request object
is a tracked website page or a tracked view. Only on tracked elements to avoid having
too much operations done on every page or other http requests.
Note: The side effect is that the last_connection_datetime is updated ONLY on tracked elements."""
url = request.httprequest.url
website_track_values = {
'url': url,
'visit_datetime': datetime.now(),
}
if website_page:
website_track_values['page_id'] = website_page.id
domain = [('page_id', '=', website_page.id)]
else:
domain = [('url', '=', url)]
visitor_sudo._add_tracking(domain, website_track_values)
if visitor_sudo.lang_id.id != request.lang.id:
visitor_sudo.write({'lang_id': request.lang.id})
def _add_tracking(self, domain, website_track_values):
""" Add the track and update the visitor"""
domain = expression.AND([domain, [('visitor_id', '=', self.id)]])
last_view = self.env['website.track'].sudo().search(domain, limit=1)
if not last_view or last_view.visit_datetime < datetime.now() - timedelta(minutes=30):
website_track_values['visitor_id'] = self.id
self.env['website.track'].create(website_track_values)
self._update_visitor_last_visit()
def _create_visitor(self):
""" Create a visitor. Tracking is added after the visitor has been created."""
country_code = request.session.get('geoip', {}).get('country_code', False)
country_id = request.env['res.country'].sudo().search([('code', '=', country_code)], limit=1).id if country_code else False
vals = {
'lang_id': request.lang.id,
'country_id': country_id,
'website_id': request.website.id,
}
if not self.env.user._is_public():
vals['partner_id'] = self.env.user.partner_id.id
vals['name'] = self.env.user.partner_id.name
return self.sudo().create(vals)
def _cron_archive_visitors(self):
one_week_ago = datetime.now() - timedelta(days=7)
visitors_to_archive = self.env['website.visitor'].sudo().search([('last_connection_datetime', '<', one_week_ago)])
visitors_to_archive.write({'active': False})
def _update_visitor_last_visit(self):
""" We need to do this part here to avoid concurrent updates error. """
try:
with self.env.cr.savepoint():
query_lock = "SELECT * FROM website_visitor where id = %s FOR NO KEY UPDATE NOWAIT"
self.env.cr.execute(query_lock, (self.id,), log_exceptions=False)
date_now = datetime.now()
query = "UPDATE website_visitor SET "
if self.last_connection_datetime < (date_now - timedelta(hours=8)):
query += "visit_count = visit_count + 1,"
query += """
active = True,
last_connection_datetime = %s
WHERE id = %s
"""
self.env.cr.execute(query, (date_now, self.id), log_exceptions=False)
except Exception:
pass
| agpl-3.0 |
404d/Temporals-Web | temporals_web/migration/versions/201508070005_1d84b7d16aa9_add_principal_group_system.py | 1 | 1285 | """Add principal group system
Revision ID: 1d84b7d16aa9
Revises: 179651effcbd
Create Date: 2015-08-07 00:05:42.996683
"""
# revision identifiers, used by Alembic.
revision = '1d84b7d16aa9'
down_revision = '179651effcbd'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('authz_group',
sa.Column('id', sa.Integer(), unique=True, nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('principals', postgresql.ARRAY(sa.Text()), nullable=False),
sa.PrimaryKeyConstraint('id', 'name'),
sa.UniqueConstraint('id')
)
op.execute("ALTER TABLE authn_user ADD CONSTRAINT id_unique UNIQUE (id);")
op.create_table('authz_group_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('group_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['group_id'], ['authz_group.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['authn_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('authz_group_user')
op.execute("ALTER TABLE authn_user DROP CONSTRAINT id_unique;")
op.drop_table('authz_group')
| mit |
Andrei-Stepanov/avocado-vt | virttest/libvirt_xml/devices/serial.py | 22 | 2004 | """
Classes to support XML for serial devices
http://libvirt.org/formatdomain.html#elementCharSerial
"""
from virttest.libvirt_xml import base, accessors, xcepts
from virttest.libvirt_xml.devices.character import CharacterBase
class Serial(CharacterBase):
__slots__ = ('protocol_type', 'target_port', 'target_type', 'sources')
def __init__(self, type_name='pty', virsh_instance=base.virsh):
# Additional attribute for protocol type (raw, telnet, telnets, tls)
accessors.XMLAttribute('protocol_type', self, parent_xpath='/',
tag_name='protocol', attribute='type')
accessors.XMLAttribute('target_port', self, parent_xpath='/',
tag_name='target', attribute='port')
accessors.XMLAttribute('target_type', self, parent_xpath='/',
tag_name='target', attribute='type')
accessors.XMLElementList('sources', self, parent_xpath='/',
marshal_from=self.marshal_from_sources,
marshal_to=self.marshal_to_sources)
super(Serial, self).__init__(device_tag='serial', type_name=type_name,
virsh_instance=virsh_instance)
@staticmethod
def marshal_from_sources(item, index, libvirtxml):
"""
Convert a dict to serial source attributes.
"""
del index
del libvirtxml
if not isinstance(item, dict):
raise xcepts.LibvirtXMLError("Expected a dictionary of source "
"attributes, not a %s"
% str(item))
return ('source', dict(item))
@staticmethod
def marshal_to_sources(tag, attr_dict, index, libvirtxml):
"""
Convert a source tag and attributes to a dict.
"""
del index
del libvirtxml
if tag != 'source':
return None
return dict(attr_dict)
| gpl-2.0 |
Kast0rTr0y/ansible | lib/ansible/galaxy/api.py | 12 | 11035 | #!/usr/bin/env python
########################################################################
#
# (C) 2013, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import ansible.constants as C
from ansible.compat.six import string_types
from ansible.compat.six.moves.urllib.error import HTTPError
from ansible.compat.six.moves.urllib.parse import quote as urlquote, urlencode
from ansible.errors import AnsibleError
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import open_url
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def g_connect(method):
''' wrapper to lazily initialize connection info to galaxy '''
def wrapped(self, *args, **kwargs):
if not self.initialized:
display.vvvv("Initial connection to galaxy_server: %s" % self._api_server)
server_version = self._get_server_api_version()
if server_version not in self.SUPPORTED_VERSIONS:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
self.version = server_version # for future use
display.vvvv("Base API: %s" % self.baseurl)
self.initialized = True
return method(self, *args, **kwargs)
return wrapped
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
SUPPORTED_VERSIONS = ['v1']
def __init__(self, galaxy):
self.galaxy = galaxy
self.token = GalaxyToken()
self._api_server = C.GALAXY_SERVER
self._validate_certs = not galaxy.options.ignore_certs
self.baseurl = None
self.version = None
self.initialized = False
display.debug('Validate TLS certificates: %s' % self._validate_certs)
# set the API server
if galaxy.options.api_server != C.GALAXY_SERVER:
self._api_server = galaxy.options.api_server
def __auth_header(self):
token = self.token.get()
if token is None:
raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
return {'Authorization': 'Token ' + token}
@g_connect
def __call_galaxy(self, url, args=None, headers=None, method=None):
if args and not headers:
headers = self.__auth_header()
try:
display.vvv(url)
resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method,
timeout=20)
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
except HTTPError as e:
res = json.loads(to_text(e.fp.read(), errors='surrogate_or_strict'))
raise AnsibleError(res['detail'])
return data
@property
def api_server(self):
return self._api_server
@property
def validate_certs(self):
return self._validate_certs
def _get_server_api_version(self):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
url = '%s/api/' % self._api_server
try:
return_data = open_url(url, validate_certs=self._validate_certs)
except Exception as e:
raise AnsibleError("Failed to get data from the API server (%s): %s " % (url, to_native(e)))
try:
data = json.loads(to_text(return_data.read(), errors='surrogate_or_strict'))
except Exception as e:
raise AnsibleError("Could not process data from the API server (%s): %s " % (url, to_native(e)))
if 'current_version' not in data:
raise AnsibleError("missing required 'current_version' from server response (%s)" % url)
return data['current_version']
@g_connect
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = '%s/tokens/' % self.baseurl
args = urlencode({"github_token": github_token})
resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST")
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@g_connect
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = '%s/imports/' % self.baseurl
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
elif github_repo.startswith('ansible-role'):
args['alternate_role_name'] = github_repo[len('ansible-role')+1:]
data = self.__call_galaxy(url, args=urlencode(args))
if data.get('results', None):
return data['results']
return data
@g_connect
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = '%s/imports/' % self.baseurl
if task_id is not None:
url = "%s?id=%d" % (url,task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self.__call_galaxy(url)
return data['results']
@g_connect
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = urlquote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
data = self.__call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
data = self.__call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except:
return None
@g_connect
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
data = self.__call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
@g_connect
def search_roles(self, search, **kwargs):
search_url = self.baseurl + '/search/roles/?'
if search:
search_url += '&autocomplete=' + urlquote(search)
tags = kwargs.get('tags',None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self.__call_galaxy(search_url)
return data
@g_connect
def add_secret(self, source, github_user, github_repo, secret):
url = "%s/notification_secrets/" % self.baseurl
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self.__call_galaxy(url, args=args)
return data
@g_connect
def list_secrets(self):
url = "%s/notification_secrets" % self.baseurl
data = self.__call_galaxy(url, headers=self.__auth_header())
return data
@g_connect
def remove_secret(self, secret_id):
url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
@g_connect
def delete_role(self, github_user, github_repo):
url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
| gpl-3.0 |
Distrotech/dia | plug-ins/python/otypes.py | 10 | 5934 | # PyDia Self Documentation Series - Part II : Object Types
# Copyright (c) 2003, Hans Breuer <[email protected]>
#
# generates a new diagram which contains all the currently
# registered object types sorted by their containing package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys, dia, string
def _log(s, append=1) :
pass
if append :
mode = "a"
else :
mode = "w"
f = open("c:\\temp\\otypes.log", mode)
f.write(s)
def otypes_cb(data, flags) :
if data :
diagram = None # we may be running w/o GUI
else :
diagram = dia.new("Object Types.dia")
data = diagram.data
layer = data.active_layer
otypes = dia.registered_types()
keys = otypes.keys()
keys.sort()
# property keys w/o overlap
object_props = ["obj_pos", "obj_bb", "meta"]
element_props = ["elem_corner", "elem_width", "elem_height"]
orthconn_props = ["orth_points", "orth_orient", "orth_autoroute"]
shape_props = ["flip_horizontal", "flip_vertical"]
# the following are not exclusuve to any objects type
line_props = ["line_width", "line_style", "line_colour"]
fill_props = ["fill_colour", "show_background"]
text_props = ["text_colour", "text_font", "text_height", "text"] # "text_alignment", "text_pos"
packages = {}
for s in keys :
kt = string.split(s, " - ")
if len(kt) == 2 :
if len(kt[0]) == 0 :
sp = "<unnamed>"
else :
sp = kt[0]
st = kt[1]
else :
sp = "<broken>"
st = kt[0]
if packages.has_key(sp) :
packages[sp].append(st)
else :
packages[sp] = [st]
dtp = dia.get_object_type("UML - LargePackage")
dtc = dia.get_object_type("UML - Class")
cy = 0
maxy = 0
maxx = 0
for sp in packages.keys() :
pkg = packages[sp]
op, h1, h2 = dtp.create(0.0, cy + 1.0)
op.properties["name"] = sp
layer.add_object(op)
cx = 0
for st in pkg :
if st == "Group" :
continue # too special to handle
oc, h3, h4 = dtc.create(cx + 1.0, cy + 4.0)
oc.properties["name"] = st
attrs = []
# we detect inheritance by common props
n_object = 0
n_element = 0
n_orthconn = 0
n_shape = 0
n_line = 0
n_fill = 0
n_text = 0
if otypes.has_key(st) :
o_real, h5, h6 = dia.get_object_type(st).create(0,0)
elif otypes.has_key(sp + " - " + st) :
o_real, h5, h6 = dia.get_object_type(sp + " - " + st).create(0,0)
else :
o_real = None
print "Failed to create object", sp, st
formal_params = []
if not o_real is None :
for p in o_real.properties.keys() :
if p in object_props : n_object = n_object + 1
elif p in orthconn_props : n_orthconn = n_orthconn + 1
elif p in element_props : n_element = n_element + 1
elif p in shape_props : n_shape = n_shape + 1
elif p in line_props : n_line = n_line + 1
elif p in text_props : n_text = n_text + 1
elif p in fill_props : n_fill = n_fill + 1
else : # don't replicate common props
attrs.append((p, o_real.properties[p].type, '', '', 0, 0, 0))
if n_line == len(line_props) :
formal_params.append(('Line', ''))
else : # need to add the incomplete set
for pp in line_props :
if o_real.properties.has_key(pp) :
attrs.append((pp, o_real.properties[pp].type, '', '', 0, 0, 0))
if n_fill == len(fill_props) :
formal_params.append(('Fill', ''))
else :
for pp in fill_props :
if o_real.properties.has_key(pp) :
attrs.append((pp, o_real.properties[pp].type, '', '', 0, 0, 0))
if n_text == len(text_props) :
formal_params.append(('Text', ''))
else :
for pp in text_props :
if o_real.properties.has_key(pp) :
attrs.append((pp, o_real.properties[pp].type, '', '', 0, 0, 0))
if n_orthconn == len(orthconn_props) :
oc.properties["stereotype"] = "OrthConn"
oc.properties["fill_colour"] = "light blue"
elif n_shape == len(shape_props) :
oc.properties["stereotype"] = "Shape"
oc.properties["fill_colour"] = "light cyan"
elif n_element == len(element_props) :
oc.properties["stereotype"] = "Element"
oc.properties["fill_colour"] = "light yellow"
elif n_object == len(object_props) :
oc.properties["stereotype"] = "Object"
else :
print "Huh?", st
oc.properties["fill_colour"] = "red"
oc.properties["attributes"] = attrs
if len(formal_params) > 0 :
oc.properties["template"] = 1
oc.properties["templates"] = formal_params
layer.add_object(oc)
# XXX: there really should be a way to safely delete an object. This one will crash:
# - when the object got added somewhere
# - any object method gets called afterwards
if not o_real is None :
o_real.destroy()
del o_real
cx = oc.bounding_box.right
if maxy < oc.bounding_box.bottom :
maxy = oc.bounding_box.bottom
if maxx < cx :
maxx = cx
# wrapping too long lines
if cx > 300 :
cx = 0
cy = maxy
h = op.handles[7]
# adjust the package size to fit the objects
op.move_handle(h,(maxx + 1.0, maxy + 1.0), 0, 0)
cy = maxy + 2.0
maxx = 0 # every package a new size
data.update_extents()
if diagram :
diagram.display()
diagram.flush()
# make it work standalone
return data
dia.register_action ("HelpOtypes", "Dia Object Types",
"/ToolboxMenu/Help/HelpExtensionStart",
otypes_cb)
| gpl-2.0 |
danluu/BitFunnel | NativeJIT/googletest/googletest/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
chrismeyersfsu/ansible | lib/ansible/modules/system/lvg.py | 23 | 9903 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Bulimov <[email protected]>
# based on lvol module by Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
author: "Alexander Bulimov (@abulimov)"
module: lvg
short_description: Configure LVM volume groups
description:
- This module creates, removes or resizes volume groups.
version_added: "1.1"
options:
vg:
description:
- The name of the volume group.
required: true
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.
- The module will take care of running pvcreate if needed.
required: false
pesize:
description:
- The size of the physical extent in megabytes. Must be a power of 2.
default: 4
required: false
vg_options:
description:
- Additional options to pass to C(vgcreate) when creating the volume group.
default: null
required: false
version_added: "1.6"
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the volume group exists.
required: false
force:
choices: [ "yes", "no" ]
default: "no"
description:
- If yes, allows to remove volume group with logical volumes.
required: false
notes:
- module does not modify PE size for already present volume group
'''
EXAMPLES = '''
# Create a volume group on top of /dev/sda1 with physical extent size = 32MB.
- lvg:
vg: vg.services
pvs: /dev/sda1
pesize: 32
# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
# If, for example, we already have VG vg.services on top of /dev/sdb1,
# this VG will be extended by /dev/sdc5. Or if vg.services was created on
# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
# and then reduce by /dev/sda5.
- lvg:
vg: vg.services
pvs: /dev/sdb1,/dev/sdc5
# Remove a volume group with name vg.services.
- lvg:
vg: vg.services
state: absent
'''
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'pv_count': int(parts[1]),
'lv_count': int(parts[2]),
})
return vgs
def find_mapper_device_name(module, dm_device):
dmsetup_cmd = module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc != 0:
module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def parse_pvs(module, data):
pvs = []
dm_prefix = '/dev/dm-'
for line in data.splitlines():
parts = line.strip().split(';')
if parts[0].startswith(dm_prefix):
parts[0] = find_mapper_device_name(module, parts[0])
pvs.append({
'name': parts[0],
'vg_name': parts[1],
})
return pvs
def main():
module = AnsibleModule(
argument_spec = dict(
vg=dict(required=True),
pvs=dict(type='list'),
pesize=dict(type='int', default=4),
vg_options=dict(default=''),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
),
supports_check_mode=True,
)
vg = module.params['vg']
state = module.params['state']
force = module.boolean(module.params['force'])
pesize = module.params['pesize']
vgoptions = module.params['vg_options'].split()
dev_list = []
if module.params['pvs']:
dev_list = module.params['pvs']
elif state == 'present':
module.fail_json(msg="No physical volumes given.")
# LVM always uses real paths not symlinks so replace symlinks with actual path
for idx, dev in enumerate(dev_list):
dev_list[idx] = os.path.realpath(dev)
if state=='present':
### check given devices
for test_dev in dev_list:
if not os.path.exists(test_dev):
module.fail_json(msg="Device %s not found."%test_dev)
### get pv list
pvs_cmd = module.get_bin_path('pvs', True)
rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd)
if rc != 0:
module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err)
### check pv for devices
pvs = parse_pvs(module, current_pvs)
used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]
if used_pvs:
module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name']))
vgs_cmd = module.get_bin_path('vgs', True)
rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
if rc != 0:
module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err)
changed = False
vgs = parse_vgs(current_vgs)
for test_vg in vgs:
if test_vg['name'] == vg:
this_vg = test_vg
break
else:
this_vg = None
if this_vg is None:
if state == 'present':
### create VG
if module.check_mode:
changed = True
else:
### create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in dev_list:
rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd,current_dev))
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
vgcreate_cmd = module.get_bin_path('vgcreate')
rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err)
else:
if state == 'absent':
if module.check_mode:
module.exit_json(changed=True)
else:
if this_vg['lv_count'] == 0 or force:
### remove VG
vgremove_cmd = module.get_bin_path('vgremove', True)
rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err)
else:
module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg))
### resize VG
current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]
devs_to_remove = list(set(current_devs) - set(dev_list))
devs_to_add = list(set(dev_list) - set(current_devs))
if devs_to_add or devs_to_remove:
if module.check_mode:
changed = True
else:
if devs_to_add:
devs_to_add_string = ' '.join(devs_to_add)
### create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in devs_to_add:
rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd, current_dev))
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err)
### add PV to our VG
vgextend_cmd = module.get_bin_path('vgextend', True)
rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
if rc == 0:
changed = True
else:
module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err)
### remove some PV from our VG
if devs_to_remove:
devs_to_remove_string = ' '.join(devs_to_remove)
vgreduce_cmd = module.get_bin_path('vgreduce', True)
rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
if rc == 0:
changed = True
else:
module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Celthi/youtube-dl-GUI | youtube_dl/extractor/foxgay.py | 146 | 1735 | from __future__ import unicode_literals
from .common import InfoExtractor
class FoxgayIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?foxgay\.com/videos/(?:\S+-)?(?P<id>\d+)\.shtml'
_TEST = {
'url': 'http://foxgay.com/videos/fuck-turkish-style-2582.shtml',
'md5': '80d72beab5d04e1655a56ad37afe6841',
'info_dict': {
'id': '2582',
'ext': 'mp4',
'title': 'md5:6122f7ae0fc6b21ebdf59c5e083ce25a',
'description': 'md5:5e51dc4405f1fd315f7927daed2ce5cf',
'age_limit': 18,
'thumbnail': 're:https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>(?P<title>.*?)</title>',
webpage, 'title', fatal=False)
description = self._html_search_regex(
r'<div class="ico_desc"><h2>(?P<description>.*?)</h2>',
webpage, 'description', fatal=False)
# Find the URL for the iFrame which contains the actual video.
iframe = self._download_webpage(
self._html_search_regex(r'iframe src="(?P<frame>.*?)"', webpage, 'video frame'),
video_id)
video_url = self._html_search_regex(
r"v_path = '(?P<vid>http://.*?)'", iframe, 'url')
thumb_url = self._html_search_regex(
r"t_path = '(?P<thumb>http://.*?)'", iframe, 'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'description': description,
'thumbnail': thumb_url,
'age_limit': 18,
}
| mit |
myerpengine/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Change.py | 90 | 4696 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from lib.logreport import *
from lib.rpc import *
from ServerParameter import *
database="test"
class Change( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
desktop=getDesktop()
log_detail(self)
self.logobj=Logger()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.protocol = {
'XML-RPC': 'http://',
'XML-RPC secure': 'https://',
'NET-RPC': 'socket://',
}
host=port=protocol=''
if docinfo.getUserFieldValue(0):
m = re.match('^(http[s]?://|socket://)([\w.\-]+):(\d{1,5})$', docinfo.getUserFieldValue(0) or '')
host = m.group(2)
port = m.group(3)
protocol = m.group(1)
if protocol:
for (key, value) in self.protocol.iteritems():
if value==protocol:
protocol=key
break
else:
protocol='XML-RPC'
self.win=DBModalDialog(60, 50, 120, 90, "Connect to Open ERP Server")
self.win.addFixedText("lblVariable", 38, 12, 25, 15, "Server ")
self.win.addEdit("txtHost",-2,9,60,15, host or 'localhost')
self.win.addFixedText("lblReportName",45 , 31, 15, 15, "Port ")
self.win.addEdit("txtPort",-2,28,60,15, port or "8069")
self.win.addFixedText("lblLoginName", 2, 51, 60, 15, "Protocol Connection")
self.win.addComboListBox("lstProtocol", -2, 48, 60, 15, True)
self.lstProtocol = self.win.getControl( "lstProtocol" )
self.win.addButton( 'btnNext', -2, -5, 30, 15, 'Next', actionListenerProc = self.btnNext_clicked )
self.win.addButton( 'btnCancel', -2 - 30 - 5 ,-5, 30, 15, 'Cancel', actionListenerProc = self.btnCancel_clicked )
for i in self.protocol.keys():
self.lstProtocol.addItem(i,self.lstProtocol.getItemCount() )
self.win.doModalDialog( "lstProtocol", protocol)
def btnNext_clicked(self, oActionEvent):
global url
aVal=''
#aVal= Fetature used
try:
url = self.protocol[self.win.getListBoxSelectedItem("lstProtocol")]+self.win.getEditText("txtHost")+":"+self.win.getEditText("txtPort")
self.sock=RPCSession(url)
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
docinfo.setUserFieldValue(0,url)
res=self.sock.listdb()
self.win.endExecute()
ServerParameter(aVal,url)
except :
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ServerParameter', LOG_ERROR, info)
ErrorDialog("Connection to server is fail. Please check your Server Parameter.", "", "Error!")
self.win.endExecute()
def btnCancel_clicked(self,oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Change(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( Change, "org.openoffice.openerp.report.change", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SteveHNH/ansible | lib/ansible/modules/network/vyos/vyos_banner.py | 15 | 5123 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_banner
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage multiline banners on VyOS devices
description:
- This will configure both pre-login and post-login banners on remote
devices running VyOS. It allows playbooks to add or remote
banner text from the active running configuration.
notes:
- Tested against VYOS 1.1.7
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['pre-login', 'post-login']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is present in the current
devices active running configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the pre-login banner
vyos_banner:
banner: pre-login
text: |
this is my pre-login banner
that contains a multiline
string
state: present
- name: remove the post-login banner
vyos_banner:
banner: post-login
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner pre-login
- this is my pre-login banner
- that contains a multiline
- string
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.vyos import vyos_argument_spec
def spec_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent':
if have.get('state') != 'absent' or (have.get('state') != 'absent' and
'text' in have.keys() and have['text']):
commands.append('delete system login banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and want['text'].encode().decode('unicode_escape') != have.get('text'):
banner_cmd = 'set system login banner %s ' % module.params['banner']
banner_cmd += want['text'].strip()
commands.append(banner_cmd)
return commands
def config_to_dict(module):
data = get_config(module)
output = None
obj = {'banner': module.params['banner'], 'state': 'absent'}
for line in data.split('\n'):
if line.startswith('set system login banner %s' % obj['banner']):
match = re.findall(r'%s (.*)' % obj['banner'], line, re.M)
output = match
if output:
obj['text'] = output[0].encode().decode('unicode_escape')
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = "%r" % (str(text).strip())
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['pre-login', 'post-login']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jasmas/DiscoNet | DiscoNet/_platform_detect.py | 1 | 1264 | import sys, os
from subprocess import call as subcall
class Detect():
doc_path = ''
icon = ''
def open_method(filename):
pass
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, relative_path)
if sys.platform.startswith('win'):
import winshell
def win_open(filename):
os.startfile(filename)
Detect.open_method = win_open
Detect.icon = resource_path('disco.ico')
Detect.doc_path = winshell.my_documents()
else:
Detect.doc_path = os.path.join(os.path.expanduser('~'), 'Documents')
if sys.platform.startswith('darwin'):
def darwin_open(filename):
subcall(('open', filename))
Detect.open_method = darwin_open
Detect.icon = resource_path('disco-1024.png')
else:
Detect.icon = resource_path('disco-256.png')
if os.name == 'posix':
def posix_open(filename):
subcall(('xdg-open', filename))
Detect.open_method = posix_open
| mit |
samuelhavron/heroku-buildpack-python | Python-3.4.3/Lib/test/test_dictcomps.py | 121 | 3782 | import unittest
from test import support
# For scope testing.
g = "Global variable"
class DictComprehensionTest(unittest.TestCase):
def test_basics(self):
expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
8: 18, 9: 19}
actual = {k: k + 10 for k in range(10)}
self.assertEqual(actual, expected)
expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
actual = {k: v for k in range(10) for v in range(10) if k == v}
self.assertEqual(actual, expected)
def test_scope_isolation(self):
k = "Local Variable"
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {k: None for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(k, "Local Variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
self.assertEqual(k, "Local Variable")
self.assertEqual(actual, expected)
def test_scope_isolation_from_global(self):
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {g: None for g in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(g, "Global variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
self.assertEqual(g, "Global variable")
self.assertEqual(actual, expected)
def test_global_visibility(self):
expected = {0: 'Global variable', 1: 'Global variable',
2: 'Global variable', 3: 'Global variable',
4: 'Global variable', 5: 'Global variable',
6: 'Global variable', 7: 'Global variable',
8: 'Global variable', 9: 'Global variable'}
actual = {k: g for k in range(10)}
self.assertEqual(actual, expected)
def test_local_visibility(self):
v = "Local variable"
expected = {0: 'Local variable', 1: 'Local variable',
2: 'Local variable', 3: 'Local variable',
4: 'Local variable', 5: 'Local variable',
6: 'Local variable', 7: 'Local variable',
8: 'Local variable', 9: 'Local variable'}
actual = {k: v for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(v, "Local variable")
def test_illegal_assignment(self):
with self.assertRaisesRegex(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegex(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec")
if __name__ == "__main__":
unittest.main()
| mit |
marcoitur/Freecad_test | src/Mod/Ship/shipUtils/Units.py | 17 | 3605 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD
import Units
# Systems of length units
LENGTH_UNITS = ('mm', 'm', 'in', 'in')
MASS_UNITS = ('kg', 'kg', 'lb', 'lb')
TIME_UNITS = ('s', 's', 's', 's')
ANGLE_UNITS = ('deg', 'deg', 'deg', 'deg')
def getLengthUnits():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
units_id = param.GetInt('UserSchema', 0)
return LENGTH_UNITS[units_id]
def getLengthFormat():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
decimals = param.GetInt("Decimals", 2)
units_id = param.GetInt('UserSchema', 0)
return '{0:.' + str(decimals) + 'f} ' + LENGTH_UNITS[units_id]
def getMassUnits():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
units_id = param.GetInt('UserSchema', 0)
return MASS_UNITS[units_id]
def getMassFormat():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
decimals = param.GetInt("Decimals", 2)
units_id = param.GetInt('UserSchema', 0)
return '{0:.' + str(decimals) + 'f} ' + MASS_UNITS[units_id]
def getTimeUnits():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
units_id = param.GetInt('UserSchema', 0)
return TIME_UNITS[units_id]
def getTimeFormat():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
decimals = param.GetInt("Decimals", 2)
units_id = param.GetInt('UserSchema', 0)
return '{0:.' + str(decimals) + 'f} ' + TIME_UNITS[units_id]
def getAngleUnits():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
units_id = param.GetInt('UserSchema', 0)
return ANGLE_UNITS[units_id]
def getAngleFormat():
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units")
decimals = param.GetInt("Decimals", 2)
units_id = param.GetInt('UserSchema', 0)
return '{0:.' + str(decimals) + 'f} ' + ANGLE_UNITS[units_id]
| lgpl-2.1 |
ruuk/script.module.youtube.dl | lib/youtube_dl/extractor/twentyfourvideo.py | 11 | 4756 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
int_or_none,
xpath_attr,
xpath_element,
)
class TwentyFourVideoIE(InfoExtractor):
IE_NAME = '24video'
_VALID_URL = r'''(?x)
https?://
(?P<host>
(?:(?:www|porno?)\.)?24video\.
(?:net|me|xxx|sexy?|tube|adult|site|vip)
)/
(?:
video/(?:(?:view|xml)/)?|
player/new24_play\.swf\?id=
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'http://www.24video.net/video/view/1044982',
'md5': 'e09fc0901d9eaeedac872f154931deeb',
'info_dict': {
'id': '1044982',
'ext': 'mp4',
'title': 'Эротика каменного века',
'description': 'Как смотрели порно в каменном веке.',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'SUPERTELO',
'duration': 31,
'timestamp': 1275937857,
'upload_date': '20100607',
'age_limit': 18,
'like_count': int,
'dislike_count': int,
},
}, {
'url': 'http://www.24video.net/player/new24_play.swf?id=1044982',
'only_matching': True,
}, {
'url': 'http://www.24video.me/video/view/1044982',
'only_matching': True,
}, {
'url': 'http://www.24video.tube/video/view/2363750',
'only_matching': True,
}, {
'url': 'https://www.24video.site/video/view/2640421',
'only_matching': True,
}, {
'url': 'https://porno.24video.net/video/2640421-vsya-takaya-gibkaya-i-v-masle',
'only_matching': True,
}, {
'url': 'https://www.24video.vip/video/view/1044982',
'only_matching': True,
}, {
'url': 'https://porn.24video.net/video/2640421-vsya-takay',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
host = mobj.group('host')
webpage = self._download_webpage(
'http://%s/video/view/%s' % (host, video_id), video_id)
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'<(p|span)[^>]+itemprop="description"[^>]*>(?P<description>[^<]+)</\1>',
webpage, 'description', fatal=False, group='description')
thumbnail = self._og_search_thumbnail(webpage)
duration = int_or_none(self._og_search_property(
'duration', webpage, 'duration', fatal=False))
timestamp = parse_iso8601(self._search_regex(
r'<time[^>]+\bdatetime="([^"]+)"[^>]+itemprop="uploadDate"',
webpage, 'upload date', fatal=False))
uploader = self._html_search_regex(
r'class="video-uploaded"[^>]*>\s*<a href="/jsecUser/movies/[^"]+"[^>]*>([^<]+)</a>',
webpage, 'uploader', fatal=False)
view_count = int_or_none(self._html_search_regex(
r'<span class="video-views">(\d+) просмотр',
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._html_search_regex(
r'<a[^>]+href="#tab-comments"[^>]*>(\d+) комментари',
webpage, 'comment count', default=None))
# Sets some cookies
self._download_xml(
r'http://%s/video/xml/%s?mode=init' % (host, video_id),
video_id, 'Downloading init XML')
video_xml = self._download_xml(
'http://%s/video/xml/%s?mode=play' % (host, video_id),
video_id, 'Downloading video XML')
video = xpath_element(video_xml, './/video', 'video', fatal=True)
formats = [{
'url': xpath_attr(video, '', 'url', 'video URL', fatal=True),
}]
like_count = int_or_none(video.get('ratingPlus'))
dislike_count = int_or_none(video.get('ratingMinus'))
age_limit = 18 if video.get('adult') == 'true' else 0
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'dislike_count': dislike_count,
'age_limit': age_limit,
'formats': formats,
}
| gpl-2.0 |
joshua-cogliati-inl/moose | framework/contrib/nsiqcppstyle/nsiqcppstyle_state.py | 43 | 3572 | # Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class _NsiqCppStyleState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.checkers = []
self.errorPerChecker = {}
self.errorPerFile = {}
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'vs7'
self.verbose = False
self.showUrl = False
self.reportError = False
self.suppressRules = {}
self.varMap = {}
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCheckers(self, checkers):
self.checkers = checkers
def ResetErrorCount(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errorPerChecker = {}
self.errorPerFile = {}
def IncrementErrorCount(self, category, file):
"""Bumps the module's error statistic."""
self.error_count += 1
self.errorPerChecker[category] = self.errorPerChecker.get(category, 0) + 1
errorsPerFile = self.errorPerFile.get(file, {})
errorsPerFile[category] = errorsPerFile.get(category, 0) + 1
self.errorPerFile[file] = errorsPerFile
def SuppressRule(self, ruleName):
self.suppressRules[ruleName] = True
def ResetRuleSuppression(self):
self.suppressRules = {}
def CheckRuleSuppression(self, ruleName):
return self.suppressRules.get(ruleName, False)
def GetVar(self, key, defaultValue):
return self.varMap.get(key, defaultValue)
_nsiqcppstyle_state = _NsiqCppStyleState()
| lgpl-2.1 |
dmitriy0611/django | tests/migrations/test_operations.py | 64 | 93154 | from __future__ import unicode_literals
import unittest
from django.db import connection, migrations, models, transaction
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.db.models.fields import NOT_PROVIDED
from django.db.transaction import atomic
from django.db.utils import IntegrityError
from django.test import override_settings
from django.utils import six
from .models import FoodManager, FoodQuerySet
from .test_base import MigrationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class OperationTestBase(MigrationTestBase):
"""
Common functions to help test operations.
"""
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def unapply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.unapply(project_state, editor)
def make_test_state(self, app_label, operation, **kwargs):
"""
Makes a test state using set_up_test_model and returns the
original state and the state after the migration is applied.
"""
project_state = self.set_up_test_model(app_label, **kwargs)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
return project_state, new_state
def set_up_test_model(self, app_label, second_model=False, third_model=False,
related_model=False, mti_model=False, proxy_model=False, manager_model=False,
unique_together=False, options=False, db_table=None, index_together=False):
"""
Creates a test model state and database table.
"""
# Delete the tables if they already exist
table_names = [
# Start with ManyToMany tables
'_pony_stables', '_pony_vans',
# Then standard model tables
'_pony', '_stable', '_van',
]
tables = [(app_label + table_name) for table_name in table_names]
with connection.cursor() as cursor:
table_names = connection.introspection.table_names(cursor)
connection.disable_constraint_checking()
sql_delete_table = connection.schema_editor().sql_delete_table
with transaction.atomic():
for table in tables:
if table in table_names:
cursor.execute(sql_delete_table % {
"table": connection.ops.quote_name(table),
})
connection.enable_constraint_checking()
# Make the "current" state
model_options = {
"swappable": "TEST_SWAP_MODEL",
"index_together": [["weight", "pink"]] if index_together else [],
"unique_together": [["pink", "weight"]] if unique_together else [],
}
if options:
model_options["permissions"] = [("can_groom", "Can groom")]
if db_table:
model_options["db_table"] = db_table
operations = [migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options=model_options,
)]
if second_model:
operations.append(migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
]
))
if third_model:
operations.append(migrations.CreateModel(
"Van",
[
("id", models.AutoField(primary_key=True)),
]
))
if related_model:
operations.append(migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony")),
("friend", models.ForeignKey("self"))
],
))
if mti_model:
operations.append(migrations.CreateModel(
"ShetlandPony",
fields=[
('pony_ptr', models.OneToOneField(
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
to='Pony',
)),
("cuteness", models.IntegerField(default=1)),
],
bases=['%s.Pony' % app_label],
))
if proxy_model:
operations.append(migrations.CreateModel(
"ProxyPony",
fields=[],
options={"proxy": True},
bases=['%s.Pony' % app_label],
))
if manager_model:
operations.append(migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
))
return self.apply_operations(app_label, ProjectState(), operations)
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2].keys()), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel("Foo", fields=[], managers=[("objects", models.Manager())])
definition = operation.deconstruct()
self.assertNotIn('managers', definition[2])
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony")),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards("test_crmoua", editor, project_state, new_state)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards("test_crmoua", editor, project_state, new_state)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards("test_crmoua", editor, project_state, new_state)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables"))
]
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmomm", editor, new_state, project_state)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
('pony_ptr', models.OneToOneField(
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
to='test_crmoih.Pony',
)),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmoih", editor, new_state, project_state)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
Tests that CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony", ),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crprmo", editor, new_state, project_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2].keys()), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
Tests that CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony", ),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crummo", editor, new_state, project_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
def test_create_model_managers(self):
"""
Tests that the managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlprmo", editor, new_state, project_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# Migrate forwards
new_state = project_state.clone()
new_state = self.apply_operations("test_rnmo", new_state, [operation])
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual("test_rnmo.Horse", new_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# Migrate backwards
original_state = self.unapply_operations("test_rnmo", project_state, [operation])
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual("Pony", original_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'old_name': "Pony", 'new_name': "Horse"})
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual("test_rmwsrf.HorseRider", new_state.models["test_rmwsrf", "horserider"].fields[2][1].remote_field.model)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
with connection.schema_editor() as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmwsrf", editor, new_state, project_state)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model("test_rmwsc", related_model=True, mti_model=True)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(operation.describe(), "Rename model ShetlandPony to LittleHorse")
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model,
new_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model
)
# Before running the migration we have a table for Shetland Pony, not Little Horse
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id"))
self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id"))
with connection.schema_editor() as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id"))
self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id"))
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("ReflexivePony", fields=[
("ponies", models.ManyToManyField("self")),
]),
])
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
])
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("Rider", fields=[]),
migrations.CreateModel("Pony", fields=[
("riders", models.ManyToManyField("Rider")),
]),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("Pony", "Pony2"),
])
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("Rider", fields=[]),
migrations.CreateModel("Pony", fields=[
("riders", models.ManyToManyField("Rider")),
]),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("Rider", "Rider2"),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2)
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4)
field = [
f for n, f in new_state.models["test_adfl", "pony"].fields
if n == "height"
][0]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adchfl", project_state, [
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
])
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adtxtfl", project_state, [
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
])
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adbinfl", project_state, [
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
])
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_regr22168", editor, project_state, new_state)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4)
field = [
f for n, f in new_state.models["test_adflpd", "pony"].fields
if n == "height"
][0]
self.assertEqual(field.default, NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name", "preserve_default"])
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adflmm", editor, new_state, project_state)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field('stables').blank)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AlterField("Pony", "stables", models.ManyToManyField(to="Stable", related_name="ponies", blank=True))
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field('stables').blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True, third_model=True)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AddField("Pony", "places", models.ManyToManyField("Stable", related_name="ponies"))
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AlterField("Pony", "places", models.ManyToManyField(to="Van", related_name="ponies"))
])
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations("test_rmflmm", project_state, operations=[
migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
])
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations("test_rmflmm", project_state, operations=operations)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations("test_rmflmmwt", project_state, operations=[
migrations.CreateModel("PonyStables", fields=[
("pony", models.ForeignKey('test_rmflmmwt.Pony')),
("stable", models.ForeignKey('test_rmflmmwt.Stable')),
]),
migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies", through='test_rmflmmwt.PonyStables'))
])
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [migrations.RemoveField("Pony", "stables")]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'name': 'pink'})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(operation.describe(), "Rename table for Pony to test_almota_pony_2")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony_2")
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_almota", editor, new_state, project_state)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'table': "test_almota_pony_2"})
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony")
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_almota", editor, new_state, project_state)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = 'pony_foo'
project_state = self.set_up_test_model(app_label, second_model=True, db_table=pony_db_table)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable"))
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name='pony', table=None)
operation.state_forwards(app_label, second_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True))
self.assertEqual(operation.describe(), "Alter field pink on Pony")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertEqual(project_state.models["test_alfl", "pony"].get_field_by_name("pink").null, False)
self.assertEqual(new_state.models["test_alfl", "pony"].get_field_by_name("pink").null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_pk(self):
"""
Tests the AlterField operation on primary keys (for things like PostgreSQL's SERIAL weirdness)
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField("Pony", "id", models.IntegerField(primary_key=True))
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(project_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.AutoField)
self.assertIsInstance(new_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.IntegerField)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alflpk", editor, new_state, project_state)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
# Test the state alteration
operation = migrations.AlterField("Pony", "id", models.FloatField(primary_key=True))
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(project_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.AutoField)
self.assertIsInstance(new_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.FloatField)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_pony")
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_rider")
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpkfk", editor, project_state, new_state)
assertIdTypeEqualsFkType()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alflpkfk", editor, new_state, project_state)
assertIdTypeEqualsFkType()
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model("test_rnfl", unique_together=True, index_together=True)
# Test the state alteration
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", [n for n, f in new_state.models["test_rnfl", "pony"].fields])
self.assertNotIn("pink", [n for n, f in new_state.models["test_rnfl", "pony"].fields])
# Make sure the unique_together has the renamed column too
self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
# Make sure the index_together has the renamed column too
self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['index_together'][0])
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['index_together'][0])
# Test the database alteration
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Ensure the unique constraint has been ported over
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_rnfl_pony")
# Ensure the index constraint has been ported over
self.assertIndexExists("test_rnfl_pony", ["weight", "blue"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Ensure the index constraint has been reset
self.assertIndexExists("test_rnfl_pony", ["weight", "pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'old_name': "pink", 'new_name': "blue"})
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(operation.describe(), "Alter unique_together for Pony (1 constraint(s))")
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(len(project_state.models["test_alunto", "pony"].options.get("unique_together", set())), 0)
self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alunto", editor, project_state, new_state)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alunto", editor, new_state, project_state)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'unique_together': {("pink", "weight")}})
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(operation.describe(), "Alter unique_together for Pony (0 constraint(s))")
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(operation.describe(), "Alter index_together for Pony (1 constraint(s))")
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(len(project_state.models["test_alinto", "pony"].options.get("index_together", set())), 0)
self.assertEqual(len(new_state.models["test_alinto", "pony"].options.get("index_together", set())), 1)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alinto", editor, new_state, project_state)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'index_together': {("pink", "weight")}})
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(operation.describe(), "Alter index_together for Pony (0 constraint(s))")
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {"permissions": [("can_groom", "Can groom")]})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 0)
self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 1)
self.assertEqual(new_state.models["test_almoop", "pony"].options["permissions"][0][0], "can_groom")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'options': {"permissions": [("can_groom", "Can groom")]}})
def test_alter_model_options_emptying(self):
"""
Tests that the AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 1)
self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 0)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'options': {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(operation.describe(), "Set order_with_respect_to on Rider to pony")
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertEqual(project_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None), None)
self.assertEqual(new_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None), "pony")
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(weight=50)
rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=1)
rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=2)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alorwrtto", editor, project_state, new_state)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model("test_alorwrtto", "Rider").objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alorwrtto", editor, new_state, project_state)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Rider", 'order_with_respect_to': "pony"})
def test_alter_model_managers(self):
"""
Tests that the managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
)
self.assertEqual(operation.describe(), "Change managers on Pony")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_alter_model_managers_emptying(self):
"""
Tests that the managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Tests that creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey(to="Pony")),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name='Rider',
name='pony',
field=models.ForeignKey(editable=False, to="Pony"),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards("test_alfk", editor, project_state, create_state)
alter_operation.database_forwards("test_alfk", editor, create_state, alter_state)
def test_alter_fk_non_fk(self):
"""
Tests that altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state("test_afknfk", operation, related_model=True)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_afknfk", editor, new_state, project_state)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on SQLite and MySQL respectively
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(len(new_state.models["test_runsql", "somethingelse"].fields), 1)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'")
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'")
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_runsql", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["reverse_sql", "sql", "state_operations"])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
["INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);", ['Ponies']],
("INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);", (3, 'Python',)),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
("DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;", [3, 'Python']),
]
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards("test_runsql", editor, project_state, new_state)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards("test_runsql", editor, new_state, project_state)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_runsql", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[
["INSERT INTO foo (bar) VALUES ('buz');"]
],
# backwards
(
("DELETE FROM foo WHERE bar = 'buz';", 'invalid', 'parameter count'),
),
)
with connection.schema_editor() as editor:
six.assertRaisesRegex(self, ValueError,
"Expected a 2-tuple but got 1",
operation.database_forwards,
"test_runsql", editor, project_state, new_state)
with connection.schema_editor() as editor:
six.assertRaisesRegex(self, ValueError,
"Expected a 2-tuple but got 3",
operation.database_backwards,
"test_runsql", editor, new_state, project_state)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(inner_method, reverse_code=inner_method_reverse)
self.assertEqual(operation.describe(), "Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0)
# Now test we can't use a string
with self.assertRaises(ValueError):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but without reverse_code set
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards("test_runpython", editor, project_state, new_state)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards("test_runpython", editor, new_state, project_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6)
self.assertEqual(project_state.apps.get_model("test_runpython", "ShetlandPony").objects.count(), 2)
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [migrations.RunPython(inner_method)]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [migrations.RunPython(inner_method, atomic=False)]
# If we're a fully-transactional database, both versions should rollback
if connection.features.can_rollback_ddl:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1)
# And deconstruction
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code"])
def test_run_python_related_assignment(self):
"""
#24282 - Tests that model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author"))
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards("test_authors", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards("test_books", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards("test_authors", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards("test_books", editor, project_state, new_state)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(migrations.RunPython.noop, migrations.RunPython.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
operation.database_backwards("test_runpython", editor, new_state, project_state)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;"
)
state_operation = migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation],
database_operations=[database_operation]
)
self.assertEqual(operation.describe(), "Custom state/database change combination")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(len(new_state.models["test_separatedatabaseandstate", "somethingelse"].fields), 1)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_separatedatabaseandstate", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_separatedatabaseandstate", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["database_operations", "state_operations"])
class SwappableOperationTests(OperationTestBase):
"""
Tests that key operations ignore swappable models
(we don't want to replicate all of them here, as the functionality
is in a common base class anyway)
"""
available_apps = [
"migrations",
"django.contrib.auth",
"django.contrib.contenttypes",
]
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_create_ignore_swapped(self):
"""
Tests that the CreateTable operation ignores swapped models.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
options={
"swappable": "TEST_SWAP_MODEL",
},
)
# Test the state alteration (it should still be there!)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crigsw", new_state)
self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crigsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crigsw", editor, project_state, new_state)
self.assertTableNotExists("test_crigsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crigsw", editor, new_state, project_state)
self.assertTableNotExists("test_crigsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_delete_ignore_swapped(self):
"""
Tests the DeleteModel operation ignores swapped models.
"""
operation = migrations.DeleteModel("Pony")
project_state, new_state = self.make_test_state("test_dligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_dligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dligsw", editor, project_state, new_state)
self.assertTableNotExists("test_dligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dligsw", editor, new_state, project_state)
self.assertTableNotExists("test_dligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_add_field_ignore_swapped(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
project_state, new_state = self.make_test_state("test_adfligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_adfligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfligsw", editor, project_state, new_state)
self.assertTableNotExists("test_adfligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfligsw", editor, new_state, project_state)
self.assertTableNotExists("test_adfligsw_pony")
| bsd-3-clause |
Changaco/oh-mainline | vendor/packages/gdata/tests/gdata_tests/calendar_resource/data_test.py | 41 | 3292 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Vic Fryzel <[email protected]>'
import unittest
import atom.core
from gdata import test_data
import gdata.calendar_resource.data
import gdata.test_config as conf
class CalendarResourceEntryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.CALENDAR_RESOURCE_ENTRY,
gdata.calendar_resource.data.CalendarResourceEntry)
self.feed = atom.core.parse(test_data.CALENDAR_RESOURCES_FEED,
gdata.calendar_resource.data.CalendarResourceFeed)
def testCalendarResourceEntryFromString(self):
self.assert_(isinstance(self.entry,
gdata.calendar_resource.data.CalendarResourceEntry))
self.assertEquals(self.entry.resource_id, 'CR-NYC-14-12-BR')
self.assertEquals(self.entry.resource_common_name, 'Boardroom')
self.assertEquals(self.entry.resource_description,
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'))
self.assertEquals(self.entry.resource_type, 'CR')
def testCalendarResourceFeedFromString(self):
self.assertEquals(len(self.feed.entry), 2)
self.assert_(isinstance(self.feed,
gdata.calendar_resource.data.CalendarResourceFeed))
self.assert_(isinstance(self.feed.entry[0],
gdata.calendar_resource.data.CalendarResourceEntry))
self.assert_(isinstance(self.feed.entry[1],
gdata.calendar_resource.data.CalendarResourceEntry))
self.assertEquals(
self.feed.entry[0].find_edit_link(),
'https://apps-apis.google.com/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR')
self.assertEquals(self.feed.entry[0].resource_id, 'CR-NYC-14-12-BR')
self.assertEquals(self.feed.entry[0].resource_common_name, 'Boardroom')
self.assertEquals(self.feed.entry[0].resource_description,
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'))
self.assertEquals(self.feed.entry[0].resource_type, 'CR')
self.assertEquals(self.feed.entry[1].resource_id,
'(Bike)-London-43-Lobby-Bike-1')
self.assertEquals(self.feed.entry[1].resource_common_name, 'London bike-1')
self.assertEquals(self.feed.entry[1].resource_description,
'Bike is in London at building 43\'s lobby.')
self.assertEquals(self.feed.entry[1].resource_type, '(Bike)')
self.assertEquals(
self.feed.entry[1].find_edit_link(),
'https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/(Bike)-London-43-Lobby-Bike-1')
def suite():
return conf.build_suite([CalendarResourceEntryTest])
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
flavour/eden | modules/templates/SHARE/config.py | 1 | 118243 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current, URL
from gluon.storage import Storage
from s3 import S3ReportRepresent
def config(settings):
"""
Settings for the SHARE Template
Migration Issues:
req_need.name is now length=64
(SHARE can use req_need.description instead if the notnull=True removed)
"""
T = current.T
settings.base.system_name = T("Humanitarian Country Team (HCT) Relief and Rehabilitation System")
settings.base.system_name_short = T("SHARE")
# UI Settings
settings.ui.menu_logo = URL(c = "static",
f = "themes",
args = ["SHARE", "img", "sharemenulogo.png"],
)
# PrePopulate data
settings.base.prepopulate += ("SHARE",)
# Theme (folder to use for views/layout.html)
settings.base.theme = "SHARE"
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
#settings.auth.registration_requests_site = True
settings.auth.registration_link_user_to = {"staff": T("Staff"),
"volunteer": T("Volunteer"),
#"member": T("Member")
}
def registration_organisation_default(default):
auth = current.auth
has_role = auth.s3_has_role
if has_role("ORG_ADMIN") and not has_role("ADMIN"):
return auth.user.organisation_id
else:
return default
settings.auth.registration_organisation_default = registration_organisation_default
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
#settings.gis.countries = ("US",)
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to Disable the Postcode selector in the LocationSelector
#settings.gis.postcode_selector = False # @ToDo: Vary by country (include in the gis_config!)
# Uncomment to show the Print control:
# http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
#settings.gis.print_button = True
# L10n settings
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
settings.security.policy = 6 # Controller, Function, Table ACLs and Entity Realm
# Don't show version info on About page
settings.security.version_info = False
# UI Settings
settings.ui.datatables_responsive = False
settings.ui.datatables_double_scroll = True
# Disable permalink
settings.ui.label_permalink = None
# Default summary pages:
settings.ui.summary = ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}],
},
)
# -------------------------------------------------------------------------
# CMS Content Management
#
settings.cms.bookmarks = True
settings.cms.richtext = True
settings.cms.show_tags = True
# -------------------------------------------------------------------------
# Events
settings.event.label = "Disaster"
# Uncomment to not use Incidents under Events
settings.event.incident = False
# -------------------------------------------------------------------------
# Messaging
settings.msg.parser = "SAMBRO" # for parse_tweet
# -------------------------------------------------------------------------
# Organisations
settings.org.sector = True
# Show Organisation Types in the rheader
settings.org.organisation_type_rheader = True
# -------------------------------------------------------------------------
# Projects
# Don't use Beneficiaries
settings.project.activity_beneficiaries = False
# Don't use Item Catalog for Distributions
settings.project.activity_items = False
settings.project.activity_sectors = True
# Links to Filtered Components for Donors & Partners
settings.project.organisation_roles = {
1: T("Organization"),
2: T("Implementing Partner"),
3: T("Donor"),
}
# -------------------------------------------------------------------------
# Supply
# Disable the use of Multiple Item Catalogs
settings.supply.catalog_multi = False
# -------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("setup", Storage(
name_nice = T("Setup"),
#description = "WebSetup",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = "Person Registry",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
("hrm", Storage(
name_nice = "Staff",
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("cms", Storage(
name_nice = "Content Management",
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = "Supply Chain Management",
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 4
)),
("asset", Storage(
name_nice = "Assets",
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 5,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = "Vehicles",
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
#)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 10,
)),
# Used just for Statuses
("project", Storage(
name_nice = "Tasks",
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 2
)),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
#)),
#("hms", Storage(
# name_nice = T("Hospitals"),
# #description = "Helps to monitor status of hospitals",
# restricted = True,
# module_type = 10
#)),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# restricted = True,
# module_type = 10,
#)),
("event", Storage(
name_nice = "Events",
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
#("transport", Storage(
# name_nice = T("Transport"),
# restricted = True,
# module_type = 10,
#)),
("stats", Storage(
name_nice = T("Statistics"),
#description = "Manages statistics",
restricted = True,
module_type = None,
)),
])
# -------------------------------------------------------------------------
def customise_cms_post_resource(r, tablename):
import json
from s3 import S3SQLCustomForm, S3SQLInlineComponent, \
S3DateFilter, S3OptionsFilter, S3TextFilter, \
s3_fieldmethod
s3db = current.s3db
# Virtual Field for Comments
# - otherwise need to do per-record DB calls inside cms_post_list_layout
# as direct list_fields come in unsorted, so can't match up to records
ctable = s3db.cms_comment
def comment_as_json(row):
body = row["cms_comment.body"]
if not body:
return None
return json.dumps({"body": body,
"created_by": row["cms_comment.created_by"],
"created_on": row["cms_comment.created_on"].isoformat(),
})
ctable.json_dump = s3_fieldmethod("json_dump",
comment_as_json,
# over-ride the default represent of s3_unicode to prevent HTML being rendered too early
#represent = lambda v: v,
)
s3db.configure("cms_comment",
extra_fields = ["body",
"created_by",
"created_on",
],
# Doesn't seem to have any impact
#orderby = "cms_comment.created_on asc",
)
table = s3db.cms_post
table.priority.readable = table.priority.writable = True
#table.series_id.readable = table.series_id.writable = True
#table.status_id.readable = table.status_id.writable = True
crud_form = S3SQLCustomForm(#(T("Type"), "series_id"),
(T("Priority"), "priority"),
#(T("Status"), "status_id"),
(T("Title"), "title"),
(T("Text"), "body"),
#(T("Location"), "location_id"),
# Tags are added client-side
S3SQLInlineComponent("document",
name = "file",
label = T("Files"),
fields = [("", "file"),
#"comments",
],
),
)
date_filter = S3DateFilter("date",
# If we introduce an end_date on Posts:
#["date", "end_date"],
label = "",
#hide_time = True,
#slider = True,
clear_text = "X",
)
date_filter.input_labels = {"ge": "Start Time/Date", "le": "End Time/Date"}
filter_widgets = [S3TextFilter(["body",
],
#formstyle = text_filter_formstyle,
label = T("Search"),
_placeholder = T("Enter search term…"),
),
#S3OptionsFilter("series_id",
# label = "",
# noneSelectedText = "Type", # T() added in widget
# no_opts = "",
# ),
S3OptionsFilter("priority",
label = "",
noneSelectedText = "Priority", # T() added in widget
no_opts = "",
),
#S3OptionsFilter("status_id",
# label = "",
# noneSelectedText = "Status", # T() added in widget
# no_opts = "",
# ),
S3OptionsFilter("created_by$organisation_id",
label = "",
noneSelectedText = "Source", # T() added in widget
no_opts = "",
),
S3OptionsFilter("tag_post.tag_id",
label = "",
noneSelectedText = "Tag", # T() added in widget
no_opts = "",
),
date_filter,
]
from templates.SHARE.controllers import cms_post_list_layout
s3db.configure("cms_post",
create_next = URL(args = [1, "post", "datalist"]),
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = [#"series_id",
"priority",
#"status_id",
"date",
"title",
"body",
"created_by",
"tag.name",
"document.file",
"comment.json_dump",
],
list_layout = cms_post_list_layout,
)
settings.customise_cms_post_resource = customise_cms_post_resource
# -------------------------------------------------------------------------
def customise_event_sitrep_resource(r, tablename):
from s3 import s3_comments_widget
table = current.s3db.event_sitrep
table.name.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "Please provide a brief summary of the Situational Update you are submitting.")
table.comments.comment = None
table.comments.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "e.g. Any additional relevant information.")
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Situational Update"),
title_display = T("HCT Activity and Response Report"),
title_list = T("Situational Updates"),
title_update = T("Edit Situational Update"),
title_upload = T("Import Situational Updates"),
label_list_button = T("List Situational Updates"),
label_delete_button = T("Delete Situational Update"),
msg_record_created = T("Situational Update added"),
msg_record_modified = T("Situational Update updated"),
msg_record_deleted = T("Situational Update deleted"),
msg_list_empty = T("No Situational Updates currently registered"))
settings.customise_event_sitrep_resource = customise_event_sitrep_resource
# -----------------------------------------------------------------------------
def customise_event_sitrep_controller(**attr):
s3 = current.response.s3
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive:
# Mark this page to have differential CSS
s3.jquery_ready.append('''$('main').attr('id', 'sitrep')''')
return output
s3.postp = postp
# Extend the width of the Summary column
dt_col_widths = {0: 110,
1: 95,
2: 100,
3: 100,
4: 100,
5: 100,
6: 110,
7: 80,
8: 90,
9: 300,
10: 110,
}
if "dtargs" in attr:
attr["dtargs"]["dt_col_widths"] = dt_col_widths
else:
attr["dtargs"] = {"dt_col_widths": dt_col_widths,
}
return attr
settings.customise_event_sitrep_controller = customise_event_sitrep_controller
# -----------------------------------------------------------------------------
def customise_gis_location_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.representation == "json":
# Special filter vars to find child locations while
# including the parent location in the JSON result:
# adm => the parent location ID
# l => the target Lx level for child locations
get_vars = r.get_vars
adm = get_vars.get("adm")
if adm:
from s3 import FS
resource = r.resource
# Filter for children of adm
query = FS("parent") == adm
# Restrict children to a certain Lx level
level = get_vars.get("l")
if level:
q = FS("level") == level
query = (query & q) if query else q
# Always include adm
query = (FS("id") == adm) | query
resource.add_filter(query)
# Push the parent to top of the list + alpha-sort
table = resource.table
resource.configure(orderby = (table.level, table.name))
return result
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -------------------------------------------------------------------------
def customise_msg_twitter_channel_resource(r, tablename):
s3db = current.s3db
def onaccept(form):
# Normal onaccept
s3db.msg_channel_onaccept(form)
_id = form.vars.id
db = current.db
table = db.msg_twitter_channel
channel_id = db(table.id == _id).select(table.channel_id,
limitby=(0, 1)).first().channel_id
# Link to Parser
table = s3db.msg_parser
_id = table.insert(channel_id=channel_id, function_name="parse_tweet", enabled=True)
s3db.msg_parser_enable(_id)
run_async = current.s3task.run_async
# Poll
run_async("msg_poll", args=["msg_twitter_channel", channel_id])
# Parse
run_async("msg_parse", args=[channel_id, "parse_tweet"])
s3db.configure(tablename,
create_onaccept = onaccept,
)
settings.customise_msg_twitter_channel_resource = customise_msg_twitter_channel_resource
# -------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
s3db = current.s3db
# Custom Components
s3db.add_components(tablename,
org_organisation_tag = (# Request Number
{"name": "req_number",
"joinby": "organisation_id",
"filterby": {"tag": "req_number",
},
"multiple": False,
},
# Vision
{"name": "vision",
"joinby": "organisation_id",
"filterby": {"tag": "vision",
},
"multiple": False,
},
),
)
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink, s3_comments_widget
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
vision = components_get("vision")
vision.table.value.widget = s3_comments_widget
crud_form = S3SQLCustomForm("name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
# Default 10 options just triggers which adds unnecessary complexity to a commonly-used form & commonly an early one (create Org when registering)
search = False,
label = T("Type"),
multiple = False,
widget = "multiselect",
),
S3SQLInlineLink("sector",
columns = 4,
field = "sector_id",
label = T("Sectors"),
),
#S3SQLInlineLink("service",
# columns = 4,
# field = "service_id",
# label = T("Services"),
# ),
"country",
"phone",
"website",
"logo",
(T("About"), "comments"),
S3SQLInlineComponent("vision",
label = T("Vision"),
fields = [("", "value")],
multiple = False,
),
S3SQLInlineComponent("req_number",
label = T("Request Number"),
fields = [("", "value")],
multiple = False,
),
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -------------------------------------------------------------------------
def customise_org_sector_controller(**attr):
s3db = current.s3db
tablename = "org_sector"
# Just 1 set of sectors / sector leads nationally
# @ToDo: Deployment Setting
#f = s3db.org_sector.location_id
#f.readable = f.writable = False
# Custom Component for Sector Leads
s3db.add_components(tablename,
org_sector_organisation = {"name": "sector_lead",
"joinby": "sector_id",
"filterby": {"lead": True,
},
},
)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("name",
"abrv",
"comments",
S3SQLInlineComponent("sector_lead",
label = T("Lead Organization(s)"),
fields = [("", "organisation_id"),],
),
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = ["name",
"abrv",
(T("Lead Organization(s)"), "sector_lead.organisation_id"),
],
)
return attr
settings.customise_org_sector_controller = customise_org_sector_controller
# -------------------------------------------------------------------------
def customise_pr_forum_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
s3db.pr_forum
s3.crud_strings["pr_forum"].title_display = T("HCT Coordination Folders")
s3.dl_no_header = True
# Comments
appname = current.request.application
s3.scripts.append("/%s/static/themes/WACOP/js/update_comments.js" % appname)
script = '''S3.wacop_comments()
S3.redraw_fns.push('wacop_comments')'''
s3.jquery_ready.append(script)
# Tags for Updates
if s3.debug:
s3.scripts.append("/%s/static/scripts/tag-it.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/tag-it.min.js" % appname)
if current.auth.s3_has_permission("update", s3db.cms_tag_post):
# @ToDo: Move the ajaxUpdateOptions into callback of getS3?
readonly = '''afterTagAdded:function(event,ui){
if(ui.duringInitialization){return}
var post_id=$(this).attr('data-post_id')
var url=S3.Ap.concat('/cms/post/',post_id,'/add_tag/',ui.tagLabel)
$.getS3(url)
S3.search.ajaxUpdateOptions('#datalist-filter-form')
},afterTagRemoved:function(event,ui){
var post_id=$(this).attr('data-post_id')
var url=S3.Ap.concat('/cms/post/',post_id,'/remove_tag/',ui.tagLabel)
$.getS3(url)
S3.search.ajaxUpdateOptions('#datalist-filter-form')
},'''
else:
readonly = '''readOnly:true'''
script = \
'''S3.tagit=function(){$('.s3-tags').tagit({placeholderText:'%s',autocomplete:{source:'%s'},%s})}
S3.tagit()
S3.redraw_fns.push('tagit')''' % (T("Add tags here…"),
URL(c="cms", f="tag",
args="tag_list.json"),
readonly)
s3.jquery_ready.append(script)
attr["rheader"] = None
attr["hide_filter"] = False
return attr
settings.customise_pr_forum_controller = customise_pr_forum_controller
# -------------------------------------------------------------------------
def req_need_commit(r, **attr):
"""
Custom method to Commit to a Need by creating an Activity Group
"""
# Create Activity Group (Response) with values from Need
need_id = r.id
db = current.db
s3db = current.s3db
ntable = s3db.req_need
ntable_id = ntable.id
netable = s3db.event_event_need
left = [netable.on(netable.need_id == ntable_id),
]
need = db(ntable_id == need_id).select(ntable.name,
ntable.location_id,
netable.event_id,
left = left,
limitby = (0, 1)
).first()
nttable = s3db.req_need_tag
query = (nttable.need_id == need_id) & \
(nttable.tag.belongs(("address", "contact"))) & \
(nttable.deleted == False)
tags = db(query).select(nttable.tag,
nttable.value,
)
contact = address = None
for tag in tags:
if tag.tag == "address":
address = tag.value
elif tag.tag == "contact":
contact = tag.value
nrtable = s3db.req_need_response
need_response_id = nrtable.insert(need_id = need_id,
name = need["req_need.name"],
location_id = need["req_need.location_id"],
contact = contact,
address = address,
)
organisation_id = current.auth.user.organisation_id
if organisation_id:
s3db.req_need_response_organisation.insert(need_response_id = need_response_id,
organisation_id = organisation_id,
role = 1,
)
event_id = need["event_event_need.event_id"]
if event_id:
aetable = s3db.event_event_need_response
aetable.insert(need_response_id = need_response_id,
event_id = event_id,
)
nltable = s3db.req_need_line
query = (nltable.need_id == need_id) & \
(nltable.deleted == False)
lines = db(query).select(nltable.id,
nltable.coarse_location_id,
nltable.location_id,
nltable.sector_id,
nltable.parameter_id,
nltable.value,
nltable.value_uncommitted,
nltable.item_category_id,
nltable.item_id,
nltable.item_pack_id,
nltable.quantity,
nltable.quantity_uncommitted,
nltable.status,
)
if lines:
linsert = s3db.req_need_response_line.insert
for line in lines:
value_uncommitted = line.value_uncommitted
if value_uncommitted is None:
# No commitments yet so commit to all
value = line.value
else:
# Only commit to the remainder
value = value_uncommitted
quantity_uncommitted = line.quantity_uncommitted
if quantity_uncommitted is None:
# No commitments yet so commit to all
quantity = line.quantity
else:
# Only commit to the remainder
quantity = quantity_uncommitted
need_line_id = line.id
linsert(need_response_id = need_response_id,
need_line_id = need_line_id,
coarse_location_id = line.coarse_location_id,
location_id = line.location_id,
sector_id = line.sector_id,
parameter_id = line.parameter_id,
value = value,
item_category_id = line.item_category_id,
item_id = line.item_id,
item_pack_id = line.item_pack_id,
quantity = quantity,
)
# Update Need Line status
req_need_line_status_update(need_line_id)
# Redirect to Update
from gluon import redirect
redirect(URL(c= "req", f="need_response",
args = [need_response_id, "update"],
))
# -------------------------------------------------------------------------
def req_need_line_commit(r, **attr):
"""
Custom method to Commit to a Need Line by creating an Activity
"""
# Create Activity with values from Need Line
need_line_id = r.id
db = current.db
s3db = current.s3db
nltable = s3db.req_need_line
query = (nltable.id == need_line_id)
line = db(query).select(nltable.id,
nltable.need_id,
nltable.coarse_location_id,
nltable.location_id,
nltable.sector_id,
nltable.parameter_id,
nltable.value,
nltable.value_uncommitted,
nltable.item_category_id,
nltable.item_id,
nltable.item_pack_id,
nltable.quantity,
nltable.quantity_uncommitted,
nltable.status,
limitby = (0, 1)
).first()
need_id = line.need_id
ntable = s3db.req_need
ntable_id = ntable.id
netable = s3db.event_event_need
left = [netable.on(netable.need_id == ntable_id),
]
need = db(ntable_id == need_id).select(ntable.name,
ntable.location_id,
netable.event_id,
left = left,
limitby = (0, 1)
).first()
nttable = s3db.req_need_tag
query = (nttable.need_id == need_id) & \
(nttable.tag.belongs(("address", "contact"))) & \
(nttable.deleted == False)
tags = db(query).select(nttable.tag,
nttable.value,
)
contact = address = None
for tag in tags:
if tag.tag == "address":
address = tag.value
elif tag.tag == "contact":
contact = tag.value
nrtable = s3db.req_need_response
need_response_id = nrtable.insert(need_id = need_id,
name = need["req_need.name"],
location_id = need["req_need.location_id"],
contact = contact,
address = address,
)
organisation_id = current.auth.user.organisation_id
if organisation_id:
s3db.req_need_response_organisation.insert(need_response_id = need_response_id,
organisation_id = organisation_id,
role = 1,
)
event_id = need["event_event_need.event_id"]
if event_id:
aetable = s3db.event_event_need_response
aetable.insert(need_response_id = need_response_id,
event_id = event_id,
)
value_uncommitted = line.value_uncommitted
if value_uncommitted is None:
# No commitments yet so commit to all
value = line.value
else:
# Only commit to the remainder
value = value_uncommitted
quantity_uncommitted = line.quantity_uncommitted
if quantity_uncommitted is None:
# No commitments yet so commit to all
quantity = line.quantity
else:
# Only commit to the remainder
quantity = quantity_uncommitted
s3db.req_need_response_line.insert(need_response_id = need_response_id,
need_line_id = need_line_id,
coarse_location_id = line.coarse_location_id,
location_id = line.location_id,
sector_id = line.sector_id,
parameter_id = line.parameter_id,
value = value,
item_category_id = line.item_category_id,
item_id = line.item_id,
item_pack_id = line.item_pack_id,
quantity = quantity,
)
# Update Need Line status
req_need_line_status_update(need_line_id)
# Redirect to Update
from gluon import redirect
redirect(URL(c= "req", f="need_response",
args = [need_response_id, "update"],
))
# -------------------------------------------------------------------------
def req_need_line_status_update(need_line_id):
"""
Update the Need Line's fulfilment Status
"""
db = current.db
s3db = current.s3db
# Read the Line details
nltable = s3db.req_need_line
iptable = s3db.supply_item_pack
query = (nltable.id == need_line_id)
left = iptable.on(nltable.item_pack_id == iptable.id)
need_line = db(query).select(nltable.parameter_id,
nltable.value,
nltable.item_id,
nltable.quantity,
iptable.quantity,
left = left,
limitby = (0, 1)
).first()
need_pack_qty = need_line["supply_item_pack.quantity"]
need_line = need_line["req_need_line"]
need_parameter_id = need_line.parameter_id
need_value = need_line.value or 0
need_value_committed = 0
need_value_reached = 0
need_quantity = need_line.quantity
if need_quantity:
need_quantity = need_quantity * need_pack_qty
else:
need_quantity = 0
need_item_id = need_line.item_id
need_quantity_committed = 0
need_quantity_delivered = 0
# Lookup which Status means 'Cancelled'
stable = s3db.project_status
status = db(stable.name == "Cancelled").select(stable.id,
limitby = (0, 1)
).first()
try:
CANCELLED = status.id
except AttributeError:
# Prepop not done? Name changed?
current.log.debug("'Cancelled' Status not found")
CANCELLED = 999999
# Read the details of all Response Lines linked to this Need Line
rltable = s3db.req_need_response_line
iptable = s3db.supply_item_pack
query = (rltable.need_line_id == need_line_id) & \
(rltable.deleted == False)
left = iptable.on(rltable.item_pack_id == iptable.id)
response_lines = db(query).select(rltable.id,
rltable.parameter_id,
rltable.value,
rltable.value_reached,
rltable.item_id,
iptable.quantity,
rltable.quantity,
rltable.quantity_delivered,
rltable.status_id,
left = left,
)
for line in response_lines:
pack_qty = line["supply_item_pack.quantity"]
line = line["req_need_response_line"]
if line.status_id == CANCELLED:
continue
if line.parameter_id == need_parameter_id:
value = line.value
if value:
need_value_committed += value
value_reached = line.value_reached
if value_reached:
need_value_reached += value_reached
if line.item_id == need_item_id:
quantity = line.quantity
if quantity:
need_quantity_committed += quantity * pack_qty
quantity_delivered = line.quantity_delivered
if quantity_delivered:
need_quantity_delivered += quantity_delivered * pack_qty
# Calculate Need values & Update
value_uncommitted = max(need_value - need_value_committed, 0)
quantity_uncommitted = max(need_quantity - need_quantity_committed, 0)
if (need_quantity_delivered >= need_quantity) and (need_value_reached >= need_value):
status = 3
elif (quantity_uncommitted <= 0) and (value_uncommitted <= 0):
status = 2
elif (need_quantity_committed > 0) or (need_value_committed > 0):
status = 1
else:
status = 0
db(nltable.id == need_line_id).update(value_committed = need_value_committed,
value_uncommitted = value_uncommitted,
value_reached = need_value_reached,
quantity_committed = need_quantity_committed,
quantity_uncommitted = quantity_uncommitted,
quantity_delivered = need_quantity_delivered,
status = status,
)
# -------------------------------------------------------------------------
def req_need_postprocess(form):
"""
Set the Realm
Set the Request Number
"""
need_id = form.vars.id
db = current.db
s3db = current.s3db
# Lookup Organisation
notable = s3db.req_need_organisation
org_link = db(notable.need_id == need_id).select(notable.organisation_id,
limitby = (0, 1),
).first()
if org_link:
organisation_id = org_link.organisation_id
else:
# Create the link (form isn't doing so when readonly!)
user = current.auth.user
if user and user.organisation_id:
organisation_id = user.organisation_id
if organisation_id:
notable.insert(need_id = need_id,
organisation_id = organisation_id)
else:
# Nothing we can do!
return
else:
# Nothing we can do!
return
# Lookup Realm
otable = s3db.org_organisation
org = db(otable.id == organisation_id).select(otable.pe_id,
limitby = (0, 1),
).first()
realm_entity = org.pe_id
# Set Realm
ntable = s3db.req_need
db(ntable.id == need_id).update(realm_entity = realm_entity)
nltable = s3db.req_need_line
db(nltable.need_id == need_id).update(realm_entity = realm_entity)
if form.record:
# Update form
return
# Lookup Request Number format
ottable = s3db.org_organisation_tag
query = (ottable.organisation_id == organisation_id) & \
(ottable.tag == "req_number")
tag = db(query).select(ottable.value,
limitby = (0, 1),
).first()
if not tag:
return
# Lookup most recently-used value
nttable = s3db.req_need_tag
query = (nttable.tag == "req_number") & \
(nttable.need_id != need_id) & \
(nttable.need_id == notable.need_id) & \
(notable.organisation_id == organisation_id)
need = db(query).select(nttable.value,
limitby = (0, 1),
orderby = ~nttable.created_on,
).first()
# Set Request Number
if need:
new_number = int(need.value.split("-", 1)[1]) + 1
req_number = "%s-%s" % (tag.value, str(new_number).zfill(6))
else:
req_number = "%s-000001" % tag.value
nttable.insert(need_id = need_id,
tag = "req_number",
value = req_number,
)
# -------------------------------------------------------------------------
def customise_req_need_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_IN_SET
from s3 import s3_comments_widget, \
S3LocationSelector, S3LocationDropdownWidget, \
S3Represent, \
S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
db = current.db
s3db = current.s3db
table = s3db.req_need
table.name.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "e.g. 400 families require drinking water in Kegalle DS Division in 1-2 days.")
table.comments.comment = None
table.comments.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "e.g. Accessibility issues, additional contacts on the ground (if any), any other relevant information.")
# These levels/labels are for SHARE/LK
table.location_id.widget = S3LocationSelector(hide_lx = False,
levels = ("L1", "L2"),
required_levels = ("L1", "L2"),
show_map = False)
ltable = s3db.req_need_line
f = ltable.coarse_location_id
f.label = T("Division")
# @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
# NB cannot have the JS in link to avoid being blocked by Chrome XSS_AUDITOR
location_represent = S3Represent(lookup = "gis_location")
f.represent = location_represent
f.widget = S3LocationDropdownWidget(level="L3", blank=True)
f = ltable.location_id
f.label = T("GN")
f.represent = location_represent
f.widget = S3LocationDropdownWidget(level="L4", blank=True)
# Custom Filtered Components
s3db.add_components(tablename,
req_need_tag = (# Address
{"name": "address",
"joinby": "need_id",
"filterby": {"tag": "address",
},
"multiple": False,
},
# Contact
{"name": "contact",
"joinby": "need_id",
"filterby": {"tag": "contact",
},
"multiple": False,
},
# Issue
{"name": "issue",
"joinby": "need_id",
"filterby": {"tag": "issue",
},
"multiple": False,
},
# Req Number
{"name": "req_number",
"joinby": "need_id",
"filterby": {"tag": "req_number",
},
"multiple": False,
},
# Original Request From
{"name": "request_from",
"joinby": "need_id",
"filterby": {"tag": "request_from",
},
"multiple": False,
},
# Verified
{"name": "verified",
"joinby": "need_id",
"filterby": {"tag": "verified",
},
"multiple": False,
},
)
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
address = components_get("address")
f = address.table.value
f.widget = s3_comments_widget
contact = components_get("contact")
f = contact.table.value
f.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "of person on the ground e.g. GA, DS")
issue = components_get("issue")
f = issue.table.value
f.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "e.g. Lack of accessibility and contaminated wells due to heavy rainfall.")
request_from = components_get("request_from")
f = request_from.table.value
f.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "Please indicate the requesting organisation/ministry.")
verified = components_get("verified")
f = verified.table.value
f.requires = IS_EMPTY_OR(IS_IN_SET(("Y", "N")))
f.represent = lambda v: T("yes") if v == "Y" else T("no")
from s3 import S3TagCheckboxWidget
f.widget = S3TagCheckboxWidget(on="Y", off="N")
f.default = "N"
auth = current.auth
user = auth.user
if user and user.organisation_id:
organisation_id = user.organisation_id
else:
organisation_id = None
if auth.s3_has_role("ADMIN") or organisation_id:
f.default = "Y"
else:
f.writable = False
if r.id and r.resource.tablename == tablename:
# Read or Update
create = False
else:
# Create
create = True
if not create:
# Read or Update
if organisation_id:
org_readonly = True
else:
rotable = s3db.req_need_organisation
org_link = db(rotable.need_id == r.id).select(rotable.organisation_id,
limitby = (0, 1)
).first()
if org_link:
org_readonly = True
else:
org_readonly = False
#table = s3db.req_need_item
#table.quantity.label = T("Quantity Requested")
#table.quantity_committed.readable = True
#table.quantity_uncommitted.readable = True
#table.quantity_delivered.readable = True
#need_item = S3SQLInlineComponent("need_item",
# label = T("Items Needed"),
# fields = ["item_category_id",
# "item_id",
# (T("Unit"), "item_pack_id"),
# (T("Needed within Timeframe"), "timeframe"),
# "quantity",
# "quantity_committed",
# "quantity_uncommitted",
# "quantity_delivered",
# #(T("Urgency"), "priority"),
# "comments",
# ],
# )
#table = s3db.req_need_demographic
#table.value.label = T("Number in Need")
#table.value_committed.readable = True
#table.value_uncommitted.readable = True
#table.value_reached.readable = True
#demographic = S3SQLInlineComponent("need_demographic",
# label = T("People Affected"),
# fields = [(T("Type"), "parameter_id"),
# #(T("Needed within Timeframe"), "timeframe"),
# "value",
# "value_committed",
# "value_uncommitted",
# "value_reached",
# "comments",
# ],
# )
#ltable.value.label = T("Number in Need")
ltable.value_committed.readable = True
ltable.value_uncommitted.readable = True
ltable.value_reached.readable = True
#ltable.quantity.label = T("Quantity Requested")
ltable.quantity_committed.readable = True
ltable.quantity_uncommitted.readable = True
ltable.quantity_delivered.readable = True
line = S3SQLInlineComponent("need_line",
label = "",
fields = ["coarse_location_id",
"location_id",
"sector_id",
(T("People affected"), "parameter_id"),
"value",
"value_committed",
(T("Number Outstanding"), "value_uncommitted"),
"value_reached",
(T("Item Category"), "item_category_id"),
"item_id",
(T("Unit"), "item_pack_id"),
(T("Item Quantity"), "quantity"),
(T("Needed within Timeframe"), "timeframe"),
"quantity_committed",
(T("Quantity Outstanding"), "quantity_uncommitted"),
"quantity_delivered",
#"comments",
],
)
else:
# Create
org_readonly = organisation_id is not None
#need_item = S3SQLInlineComponent("need_item",
# label = T("Items Needed"),
# fields = ["item_category_id",
# "item_id",
# (T("Unit"), "item_pack_id"),
# (T("Needed within Timeframe"), "timeframe"),
# "quantity",
# #(T("Urgency"), "priority"),
# "comments",
# ],
# )
#demographic = S3SQLInlineComponent("need_demographic",
# label = T("People Affected"),
# fields = [(T("Type"), "parameter_id"),
# #(T("Needed within Timeframe"), "timeframe"),
# "value",
# "comments",
# ],
# )
line = S3SQLInlineComponent("need_line",
label = "",
fields = ["coarse_location_id",
"location_id",
"sector_id",
(T("People affected"), "parameter_id"),
"value",
(T("Item Category"), "item_category_id"),
"item_id",
(T("Unit"), "item_pack_id"),
"quantity",
(T("Needed within Timeframe"), "timeframe"),
#"comments",
],
)
crud_fields = [S3SQLInlineLink("event",
field = "event_id",
label = T("Disaster"),
multiple = False,
required = True,
),
S3SQLInlineLink("organisation",
field = "organisation_id",
search = False,
label = T("Organization"),
multiple = False,
readonly = org_readonly,
required = not org_readonly,
),
"location_id",
(T("Date entered"), "date"),
#(T("Urgency"), "priority"),
# Moved into Lines
#S3SQLInlineLink("sector",
# field = "sector_id",
# search = False,
# label = T("Sector"),
# multiple = False,
# ),
"name",
(T("Original Request From"), "request_from.value"),
(T("Issue/cause"), "issue.value"),
#demographic,
#need_item,
line,
S3SQLInlineComponent("document",
label = T("Attachment"),
fields = [("", "file")],
# multiple = True has reliability issues in at least Chrome
multiple = False,
),
(T("Verified by government official"), "verified.value"),
(T("Contact details"), "contact.value"),
(T("Address for delivery/affected people"), "address.value"),
"comments",
]
from .controllers import project_ActivityRepresent
natable = s3db.req_need_activity
#f = natable.activity_id
#f.represent = project_ActivityRepresent()
natable.activity_id.represent = project_ActivityRepresent()
if not create:
# Read or Update
req_number = components_get("req_number")
req_number.table.value.writable = False
crud_fields.insert(2, (T("Request Number"), "req_number.value"))
crud_fields.insert(-2, "status")
need_links = db(natable.need_id == r.id).select(natable.activity_id)
if need_links:
# This hides the widget from Update forms instead of just rendering read-only!
#f.writable = False
crud_fields.append(S3SQLInlineLink("activity",
field = "activity_id",
label = T("Commits"),
readonly = True,
))
crud_form = S3SQLCustomForm(*crud_fields,
postprocess = req_need_postprocess)
need_line_summary = URL(c="req", f="need_line", args="summary")
s3db.configure(tablename,
create_next = need_line_summary,
delete_next = need_line_summary,
update_next = need_line_summary,
crud_form = crud_form,
)
settings.customise_req_need_resource = customise_req_need_resource
# -------------------------------------------------------------------------
def req_need_rheader(r):
"""
Resource Header for Needs
"""
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if not record:
# RHeaders only used in single-record views
return None
if r.name == "need":
# No Tabs (all done Inline)
tabs = [(T("Basic Details"), None),
#(T("Demographics"), "demographic"),
#(T("Items"), "need_item"),
#(T("Skills"), "need_skill"),
#(T("Tags"), "tag"),
]
from s3 import s3_rheader_tabs
rheader_tabs = s3_rheader_tabs(r, tabs)
location_id = r.table.location_id
from gluon import DIV, TABLE, TR, TH
rheader = DIV(TABLE(TR(TH("%s: " % location_id.label),
location_id.represent(record.location_id),
)),
rheader_tabs)
else:
# Not defined, probably using wrong rheader
rheader = None
return rheader
# -------------------------------------------------------------------------
def customise_req_need_controller(**attr):
line_id = current.request.get_vars.get("line")
if line_id:
from gluon import redirect
nltable = current.s3db.req_need_line
line = current.db(nltable.id == line_id).select(nltable.need_id,
limitby = (0, 1)
).first()
if line:
redirect(URL(args = [line.need_id],
vars = {}))
# Custom commit method to create an Activity Group from a Need
current.s3db.set_method("req", "need",
method = "commit",
action = req_need_commit)
s3 = current.response.s3
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive:
# Inject the javascript to handle dropdown filtering
# - normally injected through AddResourceLink, but this isn't there in Inline widget
# - we also need to turn the trigger & target into dicts
s3.scripts.append("/%s/static/themes/SHARE/js/need.js" % r.application)
if r.id and isinstance(output, dict) and \
current.auth.s3_has_permission("create", "project_activity"):
# Custom Button
from gluon import A
output["commit"] = A(T("Commit"),
_href = URL(args=[r.id, "commit"]),
_class = "action-btn",
#_id = "commit-btn",
)
#s3.jquery_ready.append(
#'''S3.confirmClick('#commit-btn','%s')''' % T("Do you want to commit to this need?"))
return output
s3.postp = postp
attr["rheader"] = req_need_rheader
return attr
settings.customise_req_need_controller = customise_req_need_controller
# -------------------------------------------------------------------------
def homepage_stats_update():
"""
Scheduler task to update the data files for the charts
on the homepage
"""
from .controllers import HomepageStatistics
HomepageStatistics.update_data()
settings.tasks.homepage_stats_update = homepage_stats_update
def req_need_line_update_stats(r, **attr):
"""
Method to manually update the data files for the charts
on the homepage; can be run by POSTing an empty request
to req/need_line/update_stats, e.g. via:
<form action='{{=URL(c="req", f="need_line", args=["update_stats"])}}' method='post'>
<button type='submit'>{{=T("Update Stats")}}</button>
</form>
(this could e.g. be added to the page footer for ADMINs)
"""
if r.http == "POST":
if not current.auth.s3_has_role("ADMIN"):
# No, this is not open for everybody
r.unauthorized()
else:
current.s3task.run_async("settings_task",
args = ["homepage_stats_update"])
current.session.confirmation = T("Statistics data update started")
from gluon import redirect
redirect(URL(c="default", f="index"))
else:
r.error("405", current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def customise_req_need_line_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_IN_SET, SPAN
from s3 import S3Represent
s3db = current.s3db
current.response.s3.crud_strings["req_need_line"]["title_map"] = T("Map of Needs")
req_status_opts = {0: SPAN(T("Uncommitted"),
_class = "req_status_none",
),
1: SPAN(T("Partially Committed"),
_class = "req_status_partial",
),
2: SPAN(T("Fully Committed"),
_class = "req_status_committed",
),
3: SPAN(T("Complete"),
_class = "req_status_complete",
),
}
table = s3db.req_need_line
f = table.status
f.requires = IS_EMPTY_OR(IS_IN_SET(req_status_opts, zero = None))
f.represent = S3Represent(options = req_status_opts)
f = table.coarse_location_id
f.label = T("Division")
# @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
# NB cannot have the JS in link to avoid being blocked by Chrome XSS_AUDITOR
location_represent = S3Represent(lookup = "gis_location")
f.represent = location_represent
f = table.location_id
# @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
f.represent = location_represent
if r.representation == "plain":
# Settings for Map Popups
f.label = T("GN")
# Custom method to (manually) update homepage statistics
s3db.set_method("req", "need_line",
method = "update_stats",
action = req_need_line_update_stats,
)
settings.customise_req_need_line_resource = customise_req_need_line_resource
# -------------------------------------------------------------------------
def customise_req_need_line_controller(**attr):
from s3 import S3OptionsFilter, S3TextFilter #, S3DateFilter, S3LocationFilter
s3db = current.s3db
settings.base.pdf_orientation = "Landscape"
settings.ui.summary = (# Gets replaced in postp
# @ToDo: better performance by not including here & placing directly into the view instead
{"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}],
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}],
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}],
},
#{"name": "map",
# "label": "Map",
# "widgets": [{"method": "map",
# "ajax_init": True}],
# },
)
# Custom Filtered Components
s3db.add_components("req_need",
req_need_tag = (# Req Number
{"name": "req_number",
"joinby": "need_id",
"filterby": {"tag": "req_number",
},
"multiple": False,
},
# Original Request From
{"name": "request_from",
"joinby": "need_id",
"filterby": {"tag": "request_from",
},
"multiple": False,
},
# Verified
{"name": "verified",
"joinby": "need_id",
"filterby": {"tag": "verified",
},
"multiple": False,
},
),
)
s3db.add_components("req_need_response",
req_need_response_organisation = (# Agency
{"name": "agency",
"joinby": "need_response_id",
"filterby": {"role": 1,
},
#"multiple": False,
},
),
)
filter_widgets = [S3TextFilter(["need_id$req_number.value",
"item_id$name",
# These levels are for SHARE/LK
#"location_id$L1",
"location_id$L2",
#"location_id$L3",
#"location_id$L4",
"need_id$name",
"need_id$comments",
],
label = T("Search"),
comment = T("Search for a Need by Request Number, Item, Location, Summary or Comments"),
),
#S3OptionsFilter("need_id$event.event_type_id",
# #hidden = True,
# ),
# @ToDo: Filter this list dynamically based on Event Type (if-used):
S3OptionsFilter("need_id$event__link.event_id"),
#S3LocationFilter("location_id",
# # These levels are for SHARE/LK
# levels = ("L2", "L3", "L4"),
# ),
S3OptionsFilter("need_id$location_id",
label = T("District"),
),
S3OptionsFilter("need_id$organisation__link.organisation_id",
#hidden = True,
),
S3OptionsFilter("sector_id",
#hidden = True,
),
S3OptionsFilter("parameter_id"),
S3OptionsFilter("timeframe"),
S3OptionsFilter("item_id"),
S3OptionsFilter("status",
cols = 3,
table = False,
label = T("Status"),
),
#S3DateFilter("date",
# ),
#S3OptionsFilter("need_id$verified.value",
# cols = 2,
# label = T("Verified"),
# #hidden = True,
# ),
]
s3db.configure("req_need_line",
filter_widgets = filter_widgets,
# We create a custom Create Button to create a Need not a Need Line
listadd = False,
list_fields = [(T("Status"), "status"),
(T("Orgs responding"), "need_response_line.need_response_id$agency.organisation_id"),
"need_id$date",
(T("Need entered by"), "need_id$organisation__link.organisation_id"),
(T("Original Request From"), "need_id$request_from.value"),
# These levels/Labels are for SHARE/LK
#(T("Province"), "need_id$location_id$L1"),
(T("District"), "need_id$location_id$L2"),
#(T("DS"), "location_id$L3"),
#(T("GN"), "location_id$L4"),
"sector_id",
"parameter_id",
"item_id",
"quantity",
(T("Quantity Outstanding"),"quantity_uncommitted"),
"timeframe",
(T("Request Number"), "need_id$req_number.value"),
],
popup_url = URL(c="req", f="need",
vars = {"line": "[id]"}
),
)
# Custom commit method to create an Activity from a Need Line
s3db.set_method("req", "need_line",
method = "commit",
action = req_need_line_commit)
s3 = current.response.s3
s3.crud_strings["req_need_line"] = Storage(
#label_create = T("Add Needs"),
title_list = T("Needs"),
#title_display=T("Needs"),
#title_update=T("Edit Needs"),
#title_upload = T("Import Needs"),
#label_list_button = T("List Needs"),
#label_delete_button=T("Delete Needs"),
msg_record_created=T("Needs added"),
msg_record_modified=T("Needs updated"),
msg_record_deleted=T("Needs deleted"),
msg_list_empty = T("No Needs currently registered"),
)
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and r.method == "summary":
from gluon import A, DIV
from s3 import s3_str#, S3CRUD
auth = current.auth
# Normal Action Buttons
#S3CRUD.action_buttons(r)
# Custom Action Buttons
deletable = current.db(auth.s3_accessible_query("delete", "req_need_line")).select(s3db.req_need_line.id)
restrict_d = [str(row.id) for row in deletable]
s3.actions = [{"label": s3_str(T("Open")),
"_class": "action-btn",
"url": URL(f="need", vars={"line": "[id]"}),
},
{"label": s3_str(T("Delete")),
"_class": "delete-btn",
"url": URL(args=["[id]", "delete"]),
"restrict": restrict_d,
},
]
if auth.s3_has_permission("create", "req_need_response"):
s3.actions.append({"label": s3_str(T("Commit")),
"_class": "action-btn",
"url": URL(args=["[id]", "commit"]),
})
# Custom Create Button
add_btn = DIV(DIV(DIV(A(T("Add Needs"),
_class = "action-btn",
_href = URL(f="need", args="create"),
),
_id = "list-btn-add",
),
_class = "widget-container with-tabs",
),
_class = "section-container",
)
output["common"][0] = add_btn
return output
s3.postp = postp
return attr
settings.customise_req_need_line_controller = customise_req_need_line_controller
# -------------------------------------------------------------------------
def req_need_response_postprocess(form):
"""
Set the Realm
Ensure that the Need Lines (if-any) have the correct Status
"""
db = current.db
s3db = current.s3db
need_response_id = form.vars.id
# Lookup Organisation
nrotable = s3db.req_need_response_organisation
query = (nrotable.need_response_id == need_response_id) & \
(nrotable.role == 1)
org_link = db(query).select(nrotable.organisation_id,
limitby = (0, 1),
).first()
if not org_link:
return
organisation_id = org_link.organisation_id
# Lookup Realm
otable = s3db.org_organisation
org = db(otable.id == organisation_id).select(otable.pe_id,
limitby = (0, 1),
).first()
realm_entity = org.pe_id
# Set Realm
nrtable = s3db.req_need_response
db(nrtable.id == need_response_id).update(realm_entity = realm_entity)
rltable = s3db.req_need_response_line
db(rltable.need_response_id == need_response_id).update(realm_entity = realm_entity)
# Lookup the Need Lines
query = (rltable.need_response_id == need_response_id) & \
(rltable.deleted == False)
response_lines = db(query).select(rltable.need_line_id)
for line in response_lines:
need_line_id = line.need_line_id
if need_line_id:
req_need_line_status_update(need_line_id)
# -------------------------------------------------------------------------
def customise_req_need_response_resource(r, tablename):
from s3 import s3_comments_widget, \
S3LocationDropdownWidget, S3LocationSelector, \
S3Represent, \
S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
#db = current.db
s3db = current.s3db
table = s3db.req_need_response
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Activities"),
title_list = T("Activities"),
title_display = T("Activities"),
title_update = T("Edit Activities"),
title_upload = T("Import Activities"),
label_list_button = T("List Activities"),
label_delete_button = T("Delete Activities"),
msg_record_created = T("Activities added"),
msg_record_modified = T("Activities updated"),
msg_record_deleted = T("Activities deleted"),
msg_list_empty = T("No Activities currently registered"),
)
# These levels/labels are for SHARE/LK
table.location_id.widget = S3LocationSelector(hide_lx = False,
levels = ("L1", "L2"),
required_levels = ("L1", "L2"),
show_map = False)
ltable = s3db.req_need_response_line
f = ltable.coarse_location_id
f.label = T("Division")
# @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
f.represent = S3Represent(lookup = "gis_location")
f.widget = S3LocationDropdownWidget(level="L3", blank=True)
f = ltable.location_id
f.label = T("GN")
# @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
f.represent = S3Represent(lookup = "gis_location")
f.widget = S3LocationDropdownWidget(level="L4", blank=True)
table.comments.comment = None
table.comments.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "e.g. Items changed/replaced within kits, details on partial committments to a need, any other relevant information.")
# Custom Filtered Components
s3db.add_components(tablename,
req_need_response_organisation = (# Agency
{"name": "agency",
"joinby": "need_response_id",
"filterby": {"role": 1,
},
"multiple": False,
},
# Partners
{"name": "partner",
"joinby": "need_response_id",
"filterby": {"role": 2,
},
#"multiple": False,
},
# Donors
{"name": "donor",
"joinby": "need_response_id",
"filterby": {"role": 3,
},
#"multiple": False,
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
donor = components_get("donor")
donor.table.organisation_id.default = None
partner = components_get("partner")
partner.table.organisation_id.default = None
crud_fields = [S3SQLInlineLink("event",
field = "event_id",
label = T("Disaster"),
multiple = False,
#required = True,
),
S3SQLInlineComponent("agency",
name = "agency",
label = T("Organization"),
fields = [("", "organisation_id"),],
multiple = False,
required = True,
),
# @ToDo: MultiSelectWidget is nicer UI but S3SQLInlineLink
# requires the link*ed* table as component (not the
# link table as applied here) and linked components
# cannot currently be filtered by link table fields
# (=> should solve the latter rather than the former)
# @ToDo: Fix Create Popups
S3SQLInlineComponent("partner",
name = "partner",
label = T("Implementing Partner"),
fields = [("", "organisation_id"),],
),
S3SQLInlineComponent("donor",
name = "donor",
label = T("Donor"),
fields = [("", "organisation_id"),],
),
"location_id",
(T("Date entered"), "date"),
(T("Summary of Needs/Activities"), "name"),
S3SQLInlineComponent("need_response_line",
label = "",
fields = ["coarse_location_id",
"location_id",
"sector_id",
"modality",
(T("Activity Date Planned"), "date"),
(T("Activity Date Completed"), "end_date"),
(T("Beneficiaries (Type)"), "parameter_id"),
(T("Beneficiaries Planned"), "value"),
(T("Beneficiaries Reached"), "value_reached"),
(T("Item Category"), "item_category_id"),
"item_id",
(T("Unit"), "item_pack_id"),
(T("Quantity Planned"), "quantity"),
(T("Quantity Delivered"), "quantity_delivered"),
(T("Activity Status"), "status_id"),
#"comments",
],
#multiple = False,
),
S3SQLInlineComponent("document",
label = T("Attachment"),
fields = [("", "file")],
# multiple = True has reliability issues in at least Chrome
multiple = False,
),
"contact",
"address",
"comments",
]
if r.id and r.resource.tablename == tablename and r.record.need_id:
from .controllers import req_NeedRepresent
f = table.need_id
f.represent = req_NeedRepresent()
f.writable = False
crud_fields.insert(7, "need_id")
# Post-process to update need status for response line changes
crud_form = S3SQLCustomForm(*crud_fields,
postprocess = req_need_response_postprocess)
# Make sure need status gets also updated when response lines are deleted
s3db.configure("req_need_response_line",
ondelete = req_need_response_line_ondelete,
)
need_response_line_summary = URL(c="req", f="need_response_line", args="summary")
s3db.configure(tablename,
crud_form = crud_form,
create_next = need_response_line_summary,
delete_next = need_response_line_summary,
update_next = need_response_line_summary,
)
settings.customise_req_need_response_resource = customise_req_need_response_resource
# -------------------------------------------------------------------------
def customise_req_need_response_controller(**attr):
line_id = current.request.get_vars.get("line")
if line_id:
from gluon import redirect
nltable = current.s3db.req_need_response_line
line = current.db(nltable.id == line_id).select(nltable.need_response_id,
limitby = (0, 1)
).first()
if line:
redirect(URL(args = [line.need_response_id],
vars = {}))
s3 = current.response.s3
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive:
# Inject the javascript to handle dropdown filtering
# - normally injected through AddResourceLink, but this isn't there in Inline widget
# - we also need to turn the trigger & target into dicts
s3.scripts.append("/%s/static/themes/SHARE/js/need_response.js" % r.application)
return output
s3.postp = postp
return attr
settings.customise_req_need_response_controller = customise_req_need_response_controller
# -------------------------------------------------------------------------
def req_need_response_line_ondelete(row):
"""
Ensure that the Need Line (if-any) has the correct Status
"""
import json
db = current.db
s3db = current.s3db
response_line_id = row.get("id")
# Lookup the Need Line
rltable = s3db.req_need_response_line
record = db(rltable.id == response_line_id).select(rltable.deleted_fk,
limitby = (0, 1)
).first()
if not record:
return
deleted_fk = json.loads(record.deleted_fk)
need_line_id = deleted_fk.get("need_line_id")
if not need_line_id:
return
# Check that the Need Line hasn't been deleted
nltable = s3db.req_need_line
need_line = db(nltable.id == need_line_id).select(nltable.deleted,
limitby = (0, 1)
).first()
if need_line and not need_line.deleted:
req_need_line_status_update(need_line_id)
# -------------------------------------------------------------------------
def customise_req_need_response_line_resource(r, tablename):
from s3 import S3Represent
s3db = current.s3db
table = s3db.req_need_response_line
#current.response.s3.crud_strings["req_need_response_line"] = Storage(title_map = T("Map of Activities"),)
# Settings for Map Popups
f = table.coarse_location_id
f.label = T("Division")
# @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
f.represent = S3Represent(lookup = "gis_location")
f = table.location_id
f.label = T("GN")
# @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
f.represent = S3Represent(lookup = "gis_location")
s3db.configure(tablename,
ondelete = req_need_response_line_ondelete,
popup_url = URL(c="req", f="need_response",
vars = {"line": "[id]"}
),
report_represent = NeedResponseLineReportRepresent,
)
settings.customise_req_need_response_line_resource = customise_req_need_response_line_resource
# -------------------------------------------------------------------------
def customise_req_need_response_line_controller(**attr):
from s3 import S3OptionsFilter #, S3DateFilter, S3LocationFilter, S3TextFilter
s3db = current.s3db
table = s3db.req_need_response_line
settings.base.pdf_orientation = "Landscape"
settings.ui.summary = (# Gets replaced in postp
# @ToDo: better performance by not including here & placing directly into the view instead
{"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}],
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}],
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}],
},
#{"name": "map",
# "label": "Map",
# "widgets": [{"method": "map",
# "ajax_init": True}],
# },
)
# Custom Filtered Components
s3db.add_components("req_need_response",
req_need_response_organisation = (# Agency
{"name": "agency",
"joinby": "need_response_id",
"filterby": {"role": 1,
},
#"multiple": False,
},
# Partners
{"name": "partner",
"joinby": "need_response_id",
"filterby": {"role": 2,
},
#"multiple": False,
},
# Donors
{"name": "donor",
"joinby": "need_response_id",
"filterby": {"role": 3,
},
#"multiple": False,
},
),
)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_postp(r)
else:
result = True
filter_widgets = [S3OptionsFilter("need_response_id$agency.organisation_id",
label = T("Organization"),
),
#S3OptionsFilter("need_response_id$event.event_type_id",
# #hidden = True,
# ),
# @ToDo: Filter this list dynamically based on Event Type (if-used):
S3OptionsFilter("need_response_id$event__link.event_id",
#hidden = True,
),
S3OptionsFilter("sector_id"),
#S3LocationFilter("location_id",
# label = T("Location"),
# # These levels are for SHARE/LK
# levels = ("L2", "L3", "L4"),
# ),
S3OptionsFilter("need_response_id$location_id",
label = T("District"),
),
S3OptionsFilter("need_response_id$donor.organisation_id",
label = T("Donor"),
),
S3OptionsFilter("need_response_id$partner.organisation_id",
label = T("Partner"),
),
S3OptionsFilter("parameter_id"),
S3OptionsFilter("item_id"),
#S3OptionsFilter("modality"),
#S3DateFilter("date"),
S3OptionsFilter("status_id",
cols = 4,
label = T("Status"),
#hidden = True,
),
]
list_fields = [(T("Organization"), "need_response_id$agency.organisation_id"),
(T("Implementing Partner"), "need_response_id$partner.organisation_id"),
(T("Donor"), "need_response_id$donor.organisation_id"),
# These levels/labels are for SHARE/LK
#(T("Province"), "need_response_id$location_id$L1"),
(T("District"), "need_response_id$location_id$L2"),
"coarse_location_id",
"location_id",
(T("Sector"), "sector_id"),
(T("Item"), "item_id"),
(T("Items Planned"), "quantity"),
#(T("Items Delivered"), "quantity_delivered"),
(T("Modality"), "modality"),
(T("Beneficiaries Planned"), "value"),
(T("Beneficiaries Reached"), "value_reached"),
(T("Activity Date (Planned"), "date"),
(T("Activity Status"), "status_id"),
]
if r.interactive:
s3.crud_strings["req_need_response_line"] = Storage(
#label_create = T("Add Activity"),
title_list = T("Activities"),
#title_display = T("Activity"),
#title_update = T("Edit Activity"),
#title_upload = T("Import Activities"),
#label_list_button = T("List Activities"),
#label_delete_button = T("Delete Activity"),
#msg_record_created = T("Activity added"),
#msg_record_modified = T("Activity updated"),
msg_record_deleted = T("Activity deleted"),
msg_list_empty = T("No Activities currently registered"),
)
#if r.method == "report":
# # In report drilldown, include the (Location) after quantity_delivered
# # => Needs to be a VF as we can't read the record from within represents
# #table.quantity_delivered.represent =
#
# from s3 import S3Represent, s3_fieldmethod
#
# # @ToDo: Option for gis_LocationRepresent which doesn't show level/parent, but supports translation
# gis_represent = S3Represent(lookup = "gis_location")
#
# def quantity_delivered_w_location(row):
# quantity_delivered = row["req_need_response_line.quantity_delivered"]
# location_id = row["req_need_response_line.location_id"]
# if not location_id:
# location_id = row["req_need_response_line.coarse_location_id"]
# if not location_id:
# location_id = row["req_need_response.location_id"]
# location = gis_represent(location_id)
# return "%s (%s)" % (quantity_delivered, location)
#
# table.quantity_delivered_w_location = s3_fieldmethod("quantity_delivered_w_location",
# quantity_delivered_w_location,
# # over-ride the default represent of s3_unicode to prevent HTML being rendered too early
# #represent = lambda v: v,
# )
# list_fields.insert(9, (T("Items Delivered"), "quantity_delivered_w_location"))
#else:
list_fields.insert(9, (T("Items Delivered"), "quantity_delivered"))
# Exclude the Disaster column from PDF exports
if r.representation != "pdf":
list_fields.insert(0, (T("Disaster"), "need_response_id$event__link.event_id"))
s3db.configure("req_need_response_line",
filter_widgets = filter_widgets,
# We create a custom Create Button to create a Need Response not a Need Response Line
listadd = False,
list_fields = list_fields,
)
return result
s3.prep = prep
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and r.method == "summary":
from gluon import A, DIV
from s3 import s3_str
#from s3 import S3CRUD, s3_str
# Normal Action Buttons
#S3CRUD.action_buttons(r)
# Custom Action Buttons
auth = current.auth
deletable = current.db(auth.s3_accessible_query("delete", "req_need_response_line")).select(table.id)
restrict_d = [str(row.id) for row in deletable]
s3.actions = [{"label": s3_str(T("Open")),
"_class": "action-btn",
"url": URL(f="need_response", vars={"line": "[id]"}),
},
{"label": s3_str(T("Delete")),
"_class": "delete-btn",
"url": URL(args=["[id]", "delete"]),
"restrict": restrict_d,
},
]
# Custom Create Button
add_btn = DIV(DIV(DIV(A(T("Add Activity"),
_class = "action-btn",
_href = URL(f="need_response", args="create"),
),
_id = "list-btn-add",
),
_class = "widget-container with-tabs",
),
_class = "section-container",
)
output["common"][0] = add_btn
return output
s3.postp = postp
return attr
settings.customise_req_need_response_line_controller = customise_req_need_response_line_controller
# =============================================================================
class NeedResponseLineReportRepresent(S3ReportRepresent):
"""
Custom representation of need response line records in
pivot table reports:
- show as location name
"""
def __call__(self, record_ids):
"""
Represent record_ids (custom)
@param record_ids: req_need_response_line record IDs
@returns: a JSON-serializable dict {recordID: representation}
"""
# Represent the location IDs
resource = current.s3db.resource("req_need_response_line",
id = record_ids,
)
rows = resource.select(["id", "coarse_location_id", "location_id"],
represent = True,
raw_data = True,
limit = None,
).rows
output = {}
for row in rows:
raw = row["_row"]
if raw["req_need_response_line.location_id"]:
repr_str = row["req_need_response_line.location_id"]
else:
# Fall back to coarse_location_id if no GN available
repr_str = row["req_need_response_line.coarse_location_id"]
output[raw["req_need_response_line.id"]] = repr_str
return output
# END =========================================================================
| mit |
josherick/bokeh | bokeh/resources.py | 26 | 9535 | ''' The resources module provides the Resources class for easily configuring
how BokehJS code and CSS resources should be located, loaded, and embedded in
Bokeh documents.
Also provides some pre-configured Resources objects:
Attributes:
CDN : load minified BokehJS from CDN
INLINE : provide minified BokehJS from library static directory
'''
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
from os.path import join, relpath, splitext
import re
import six
from . import __version__
from .settings import settings
from .util.paths import bokehjsdir
_DEV_PAT = re.compile(r"^(\d)+\.(\d)+\.(\d)+(dev|rc)")
def _cdn_base_url():
return "http://cdn.pydata.org"
def _get_cdn_urls(version=None, minified=True):
if version is None:
if settings.docs_cdn():
version = settings.docs_cdn()
else:
version = __version__.split('-')[0]
# check if we want minified js and css
_min = ".min" if minified else ""
base_url = _cdn_base_url()
dev_container = 'bokeh/dev'
rel_container = 'bokeh/release'
# check the 'dev' fingerprint
container = dev_container if _DEV_PAT.match(version) else rel_container
if version.endswith(('dev', 'rc')):
logger.debug("Getting CDN URL for local dev version will not produce usable URL")
result = {
'js_files' : ['%s/%s/bokeh-%s%s.js' % (base_url, container, version, _min)],
'css_files' : ['%s/%s/bokeh-%s%s.css' % (base_url, container, version, _min)],
'messages' : [],
}
if len(__version__.split('-')) > 1:
result['messages'].append({
"type" : "warn",
"text" : ("Requesting CDN BokehJS version '%s' from Bokeh development version '%s'. "
"This configuration is unsupported and may not work!" % (version, __version__))
})
return result
def _get_server_urls(root_url, minified=True):
_min = ".min" if minified else ""
result = {
'js_files' : ['%sbokehjs/static/js/bokeh%s.js' % (root_url, _min)],
'css_files' : ['%sbokehjs/static/css/bokeh%s.css' % (root_url, _min)],
'messages' : [],
}
return result
def _inline(paths):
strings = []
for path in paths:
begin = "/* BEGIN %s */" % path
middle = open(path, 'rb').read().decode("utf-8")
end = "/* END %s */" % path
strings.append(begin + '\n' + middle + '\n' + end)
return strings
class Resources(object):
''' The Resources class encapsulates information relating to loading or
embedding BokehJS code and CSS.
Args:
mode (str) : how should BokehJS be included in output
See below for descriptions of available modes
version (str, optional) : what version of BokejJS to load
Only valid with the ``'cdn'`` mode
root_dir (str, optional) : root directory for loading BokehJS resources
Only valid with ``'relative'`` and ``'relative-dev'`` modes
minified (bool, optional) : whether JavaScript and CSS should be minified or not (default: True)
root_url (str, optional) : URL and port of Bokeh Server to load resources from
Only valid with ``'server'`` and ``'server-dev'`` modes
The following **mode** values are available for configuring a Resource object:
* ``'inline'`` configure to provide entire BokehJS code and CSS inline
* ``'cdn'`` configure to load BokehJS code and CS from ``http://cdn.pydata.org``
* ``'server'`` configure to load from a Bokeh Server
* ``'server-dev'`` same as ``server`` but supports non-minified JS
* ``'relative'`` configure to load relative to the given directory
* ``'relative-dev'`` same as ``relative`` but supports non-minified JS
* ``'absolute'`` configure to load from the installed Bokeh library static directory
* ``'absolute-dev'`` same as ``absolute`` but supports non-minified JS
Once configured, a Resource object exposes the following public attributes:
Attributes:
logo_url : location of the BokehJS logo image
js_raw : any raw JS that needs to be placed inside ``<script>`` tags
css_raw : any raw CSS that needs to be places inside ``<style>`` tags
js_files : URLs of any JS files that need to be loaded by ``<script>`` tags
css_files : URLS od any CSS files that need to be loaed by ``<link>`` tags
messages : any informational messages concering this configuration
These attributes are often useful as template parameters when embedding
Bokeh plots.
'''
_default_js_files = ["js/bokeh.js"]
_default_css_files = ["css/bokeh.css"]
_default_js_files_dev = ['js/bokeh.js']
_default_css_files_dev = ['css/bokeh.css']
_default_root_dir = "."
_default_root_url = "http://127.0.0.1:5006/"
logo_url = "http://bokeh.pydata.org/_static/bokeh-transparent.png"
def __init__(self, mode='inline', version=None, root_dir=None,
minified=True, log_level="info", root_url=None):
self.mode = settings.resources(mode)
self.root_dir = settings.rootdir(root_dir)
self.version = settings.version(version)
self.minified = settings.minified(minified)
self.log_level = settings.log_level(log_level)
if root_url and not root_url.endswith("/"):
logger.warning("root_url should end with a /, adding one")
root_url = root_url + "/"
self._root_url = root_url
if mode not in ['inline', 'cdn', 'server', 'server-dev', 'relative', 'relative-dev', 'absolute', 'absolute-dev']:
raise ValueError("wrong value for 'mode' parameter, expected "
"'inline', 'cdn', 'server(-dev)', 'relative(-dev)' or 'absolute(-dev)', got %r" % self.mode)
if self.root_dir and not mode.startswith("relative"):
raise ValueError("setting 'root_dir' makes sense only when 'mode' is set to 'relative'")
if self.version and not mode.startswith('cdn'):
raise ValueError("setting 'version' makes sense only when 'mode' is set to 'cdn'")
if root_url and not mode.startswith('server'):
raise ValueError("setting 'root_url' makes sense only when 'mode' is set to 'server'")
self.dev = self.mode.endswith('-dev')
if self.dev:
self.mode = self.mode[:-4]
js_paths = self._js_paths(dev=self.dev, minified=self.minified)
css_paths = self._css_paths(dev=self.dev, minified=self.minified)
base_url = join(bokehjsdir(self.dev), "js")
self._js_raw = []
self._css_raw = []
self.js_files = []
self.css_files = []
self.messages = []
if self.mode == "inline":
self._js_raw = lambda: _inline(js_paths)
self._css_raw = lambda: _inline(css_paths)
elif self.mode == "relative":
root_dir = self.root_dir or self._default_root_dir
self.js_files = [ relpath(p, root_dir) for p in js_paths ]
self.css_files = [ relpath(p, root_dir) for p in css_paths ]
base_url = relpath(base_url, root_dir)
elif self.mode == "absolute":
self.js_files = list(js_paths)
self.css_files = list(css_paths)
elif self.mode == "cdn":
cdn = _get_cdn_urls(self.version, self.minified)
self.js_files = list(cdn['js_files'])
self.css_files = list(cdn['css_files'])
self.messages.extend(cdn['messages'])
elif self.mode == "server":
server = _get_server_urls(self.root_url, self.minified)
self.js_files = list(server['js_files'])
self.css_files = list(server['css_files'])
self.messages.extend(server['messages'])
@property
def log_level(self):
return self._log_level
@log_level.setter
def log_level(self, level):
valid_levels = [
"trace", "debug", "info", "warn", "error", "fatal"
]
if level not in valid_levels:
raise ValueError("Unknown log level '%s', valid levels are: %s", str(valid_levels))
self._log_level = level
@property
def js_raw(self):
if six.callable(self._js_raw):
self._js_raw = self._js_raw()
return self._js_raw + ['Bokeh.set_log_level("%s");' % self.log_level]
@property
def css_raw(self):
if six.callable(self._css_raw):
self._css_raw = self._css_raw()
return self._css_raw
@property
def root_url(self):
if self._root_url:
return self._root_url
else:
return self._default_root_url
def _file_paths(self, files, minified):
if minified:
files = [ root + ".min" + ext for (root, ext) in map(splitext, files) ]
return [ join(bokehjsdir(self.dev), file) for file in files ]
def _js_paths(self, minified=True, dev=False):
files = self._default_js_files_dev if self.dev else self._default_js_files
return self._file_paths(files, False if dev else minified)
def _css_paths(self, minified=True, dev=False):
files = self._default_css_files_dev if self.dev else self._default_css_files
return self._file_paths(files, False if dev else minified)
def _autoload_path(self, elementid):
return self.root_url + "bokeh/autoload.js/%s" % elementid
CDN = Resources(mode="cdn")
INLINE = Resources(mode="inline")
| bsd-3-clause |
zenodo/invenio | invenio/modules/submit/models.py | 5 | 17359 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
websubmit database models.
"""
# General imports.
from invenio.ext.sqlalchemy import db
# Create your models here.
class SbmACTION(db.Model):
"""Represents a SbmACTION record."""
__tablename__ = 'sbmACTION'
lactname = db.Column(db.Text, nullable=True)
sactname = db.Column(db.Char(3), nullable=False, server_default='',
primary_key=True)
dir = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
actionbutton = db.Column(db.Text, nullable=True)
statustext = db.Column(db.Text, nullable=True)
class SbmALLFUNCDESCR(db.Model):
"""Represents a SbmALLFUNCDESCR record."""
__tablename__ = 'sbmALLFUNCDESCR'
#FIX ME pk
function = db.Column(db.String(40), nullable=False, server_default='',
primary_key=True)
description = db.Column(db.TinyText, nullable=True)
class SbmAPPROVAL(db.Model):
"""Represents a SbmAPPROVAL record."""
__tablename__ = 'sbmAPPROVAL'
doctype = db.Column(db.String(10), nullable=False,
server_default='')
categ = db.Column(db.String(50), nullable=False,
server_default='')
rn = db.Column(db.String(50), nullable=False, server_default='',
primary_key=True)
status = db.Column(db.String(10), nullable=False,
server_default='')
dFirstReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dLastReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dAction = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
access = db.Column(db.String(20), nullable=False,
server_default='0')
note = db.Column(db.Text, nullable=False)
class SbmCATEGORIES(db.Model):
"""Represents a SbmCATEGORIES record."""
__tablename__ = 'sbmCATEGORIES'
doctype = db.Column(db.String(10), nullable=False, server_default='',
primary_key=True, index=True)
sname = db.Column(db.String(75), nullable=False, server_default='',
primary_key=True, index=True)
lname = db.Column(db.String(75), nullable=False,
server_default='')
score = db.Column(db.TinyInteger(3, unsigned=True), nullable=False,
server_default='0')
class SbmCHECKS(db.Model):
"""Represents a SbmCHECKS record."""
__tablename__ = 'sbmCHECKS'
chname = db.Column(db.String(15), nullable=False, server_default='',
primary_key=True)
chdesc = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
chefi1 = db.Column(db.Text, nullable=True)
chefi2 = db.Column(db.Text, nullable=True)
class SbmCOLLECTION(db.Model):
"""Represents a SbmCOLLECTION record."""
__tablename__ = 'sbmCOLLECTION'
id = db.Column(db.Integer(11), nullable=False,
primary_key=True,
autoincrement=True)
name = db.Column(db.String(100), nullable=False,
server_default='')
class SbmCOLLECTIONSbmCOLLECTION(db.Model):
"""Represents a SbmCOLLECTIONSbmCOLLECTION record."""
__tablename__ = 'sbmCOLLECTION_sbmCOLLECTION'
id_father = db.Column(db.Integer(11), db.ForeignKey(SbmCOLLECTION.id),
nullable=False, server_default='0', primary_key=True)
id_son = db.Column(db.Integer(11), db.ForeignKey(SbmCOLLECTION.id),
nullable=False, server_default='0', primary_key=True)
catalogue_order = db.Column(db.Integer(11), nullable=False,
server_default='0')
class SbmDOCTYPE(db.Model):
"""Represents a SbmDOCTYPE record."""
__tablename__ = 'sbmDOCTYPE'
ldocname = db.Column(db.Text, nullable=True)
sdocname = db.Column(db.String(10), nullable=True,
primary_key=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
description = db.Column(db.Text, nullable=True)
class SbmCOLLECTIONSbmDOCTYPE(db.Model):
"""Represents a SbmCOLLECTIONSbmDOCTYPE record."""
__tablename__ = 'sbmCOLLECTION_sbmDOCTYPE'
id_father = db.Column(db.Integer(11), db.ForeignKey(SbmCOLLECTION.id),
nullable=False, server_default='0', primary_key=True)
id_son = db.Column(db.Char(10), db.ForeignKey(SbmDOCTYPE.sdocname),
nullable=False, server_default='0', primary_key=True)
catalogue_order = db.Column(db.Integer(11), nullable=False,
server_default='0')
class SbmCOOKIES(db.Model):
"""Represents a SbmCOOKIES record."""
__tablename__ = 'sbmCOOKIES'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True, autoincrement=True)
name = db.Column(db.String(100), nullable=False)
value = db.Column(db.Text, nullable=True)
uid = db.Column(db.Integer(15), nullable=False)
class SbmCPLXAPPROVAL(db.Model):
"""Represents a SbmCPLXAPPROVAL record."""
__tablename__ = 'sbmCPLXAPPROVAL'
doctype = db.Column(db.String(10), nullable=False,
server_default='')
categ = db.Column(db.String(50), nullable=False,
server_default='')
rn = db.Column(db.String(50), nullable=False, server_default='',
primary_key=True)
type = db.Column(db.String(10), nullable=False,
primary_key=True)
status = db.Column(db.String(10), nullable=False)
id_group = db.Column(db.Integer(15, unsigned=True), nullable=False,
server_default='0')
id_bskBASKET = db.Column(db.Integer(15, unsigned=True), nullable=False,
server_default='0')
id_EdBoardGroup = db.Column(db.Integer(15, unsigned=True), nullable=False,
server_default='0')
dFirstReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dLastReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dEdBoardSel = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dRefereeSel = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dRefereeRecom = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dEdBoardRecom = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dPubComRecom = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dProjectLeaderAction = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
class SbmFIELD(db.Model):
"""Represents a SbmFIELD record."""
__tablename__ = 'sbmFIELD'
subname = db.Column(db.String(13), nullable=True,
primary_key=True)
pagenb = db.Column(db.Integer(11), nullable=True,
primary_key=True, autoincrement=False)
fieldnb = db.Column(db.Integer(11), nullable=True)
fidesc = db.Column(db.String(15), nullable=True,
primary_key=True)
fitext = db.Column(db.Text, nullable=True)
level = db.Column(db.Char(1), nullable=True)
sdesc = db.Column(db.Text, nullable=True)
checkn = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
fiefi1 = db.Column(db.Text, nullable=True)
fiefi2 = db.Column(db.Text, nullable=True)
class SbmFIELDDESC(db.Model):
"""Represents a SbmFIELDDESC record."""
__tablename__ = 'sbmFIELDDESC'
name = db.Column(db.String(15), #db.ForeignKey(SbmFIELD.fidesc),
nullable=False, server_default='', primary_key=True)
alephcode = db.Column(db.String(50), nullable=True)
marccode = db.Column(db.String(50), nullable=False, server_default='')
type = db.Column(db.Char(1), nullable=True)
size = db.Column(db.Integer(11), nullable=True)
rows = db.Column(db.Integer(11), nullable=True)
cols = db.Column(db.Integer(11), nullable=True)
maxlength = db.Column(db.Integer(11), nullable=True)
val = db.Column(db.Text, nullable=True)
fidesc = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
modifytext = db.Column(db.Text, nullable=True)
fddfi2 = db.Column(db.Text, nullable=True)
cookie = db.Column(db.Integer(11), nullable=True,
server_default='0')
#field = db.relationship(SbmFIELD, backref='fielddescs')
class SbmFORMATEXTENSION(db.Model):
"""Represents a SbmFORMATEXTENSION record."""
__tablename__ = 'sbmFORMATEXTENSION'
FILE_FORMAT = db.Column(db.Text(50), nullable=False,
primary_key=True)
FILE_EXTENSION = db.Column(db.Text(10), nullable=False,
primary_key=True)
class SbmFUNCTIONS(db.Model):
"""Represents a SbmFUNCTIONS record."""
__tablename__ = 'sbmFUNCTIONS'
action = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
function = db.Column(db.String(40), nullable=False,
server_default='', primary_key=True)
score = db.Column(db.Integer(11), nullable=False,
server_default='0', primary_key=True)
step = db.Column(db.TinyInteger(4), nullable=False,
server_default='1', primary_key=True)
class SbmFUNDESC(db.Model):
"""Represents a SbmFUNDESC record."""
__tablename__ = 'sbmFUNDESC'
function = db.Column(db.String(40), nullable=False,
server_default='', primary_key=True)
param = db.Column(db.String(40), primary_key=True)
class SbmGFILERESULT(db.Model):
"""Represents a SbmGFILERESULT record."""
__tablename__ = 'sbmGFILERESULT'
FORMAT = db.Column(db.Text(50), nullable=False,
primary_key=True)
RESULT = db.Column(db.Text(50), nullable=False,
primary_key=True)
class SbmIMPLEMENT(db.Model):
"""Represents a SbmIMPLEMENT record."""
__tablename__ = 'sbmIMPLEMENT'
docname = db.Column(db.String(10), nullable=True)
actname = db.Column(db.Char(3), nullable=True)
displayed = db.Column(db.Char(1), nullable=True)
subname = db.Column(db.String(13), nullable=True, primary_key=True)
nbpg = db.Column(db.Integer(11), nullable=True, primary_key=True,
autoincrement=False)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
buttonorder = db.Column(db.Integer(11), nullable=True)
statustext = db.Column(db.Text, nullable=True)
level = db.Column(db.Char(1), nullable=False, server_default='')
score = db.Column(db.Integer(11), nullable=False, server_default='0')
stpage = db.Column(db.Integer(11), nullable=False, server_default='0')
endtxt = db.Column(db.String(100), nullable=False, server_default='')
class SbmPARAMETERS(db.Model):
"""Represents a SbmPARAMETERS record."""
__tablename__ = 'sbmPARAMETERS'
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
name = db.Column(db.String(40), nullable=False,
server_default='', primary_key=True)
value = db.Column(db.Text, nullable=False)
class SbmPUBLICATION(db.Model):
"""Represents a SbmPUBLICATION record."""
__tablename__ = 'sbmPUBLICATION'
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
categ = db.Column(db.String(50), nullable=False,
server_default='', primary_key=True)
rn = db.Column(db.String(50), nullable=False, server_default='',
primary_key=True)
status = db.Column(db.String(10), nullable=False, server_default='')
dFirstReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dLastReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dAction = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
accessref = db.Column(db.String(20), nullable=False, server_default='')
accessedi = db.Column(db.String(20), nullable=False, server_default='')
access = db.Column(db.String(20), nullable=False, server_default='')
referees = db.Column(db.String(50), nullable=False, server_default='')
authoremail = db.Column(db.String(50), nullable=False,
server_default='')
dRefSelection = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dRefRec = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dEdiRec = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
accessspo = db.Column(db.String(20), nullable=False, server_default='')
journal = db.Column(db.String(100), nullable=True)
class SbmPUBLICATIONCOMM(db.Model):
"""Represents a SbmPUBLICATIONCOMM record."""
__tablename__ = 'sbmPUBLICATIONCOMM'
id = db.Column(db.Integer(11), nullable=False,
primary_key=True, autoincrement=True)
id_parent = db.Column(db.Integer(11), server_default='0', nullable=True)
rn = db.Column(db.String(100), nullable=False, server_default='')
firstname = db.Column(db.String(100), nullable=True)
secondname = db.Column(db.String(100), nullable=True)
email = db.Column(db.String(100), nullable=True)
date = db.Column(db.String(40), nullable=False, server_default='')
synopsis = db.Column(db.String(255), nullable=False, server_default='')
commentfulltext = db.Column(db.Text, nullable=True)
class SbmPUBLICATIONDATA(db.Model):
"""Represents a SbmPUBLICATIONDATA record."""
__tablename__ = 'sbmPUBLICATIONDATA'
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
editoboard = db.Column(db.String(250), nullable=False, server_default='')
base = db.Column(db.String(10), nullable=False, server_default='')
logicalbase = db.Column(db.String(10), nullable=False, server_default='')
spokesperson = db.Column(db.String(50), nullable=False, server_default='')
class SbmREFEREES(db.Model):
"""Represents a SbmREFEREES record."""
__tablename__ = 'sbmREFEREES'
doctype = db.Column(db.String(10), nullable=False, server_default='')
categ = db.Column(db.String(10), nullable=False, server_default='')
name = db.Column(db.String(50), nullable=False, server_default='')
address = db.Column(db.String(50), nullable=False, server_default='')
rid = db.Column(db.Integer(11), nullable=False, primary_key=True,
autoincrement=True)
class SbmSUBMISSIONS(db.Model):
"""Represents a SbmSUBMISSIONS record."""
__tablename__ = 'sbmSUBMISSIONS'
email = db.Column(db.String(50), nullable=False,
server_default='')
doctype = db.Column(db.String(10), nullable=False,
server_default='')
action = db.Column(db.String(10), nullable=False,
server_default='')
status = db.Column(db.String(10), nullable=False,
server_default='')
id = db.Column(db.String(30), nullable=False,
server_default='')
reference = db.Column(db.String(40), nullable=False,
server_default='')
cd = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
md = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
log_id = db.Column(db.Integer(11), nullable=False,
primary_key=True,
autoincrement=True)
__all__ = ['SbmACTION',
'SbmALLFUNCDESCR',
'SbmAPPROVAL',
'SbmCATEGORIES',
'SbmCHECKS',
'SbmCOLLECTION',
'SbmCOLLECTIONSbmCOLLECTION',
'SbmDOCTYPE',
'SbmCOLLECTIONSbmDOCTYPE',
'SbmCOOKIES',
'SbmCPLXAPPROVAL',
'SbmFIELD',
'SbmFIELDDESC',
'SbmFORMATEXTENSION',
'SbmFUNCTIONS',
'SbmFUNDESC',
'SbmGFILERESULT',
'SbmIMPLEMENT',
'SbmPARAMETERS',
'SbmPUBLICATION',
'SbmPUBLICATIONCOMM',
'SbmPUBLICATIONDATA',
'SbmREFEREES',
'SbmSUBMISSIONS']
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.