repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
radez/python-heatclient | setup.py | 1 | 1880 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
from heatclient.openstack.common import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setuptools.setup(
name="python-heatclient",
version=setup.get_post_version('heatclient'),
author='Heat API Developers',
author_email='[email protected]',
description="Client library for Heat orchestration API",
long_description=read('README.md'),
license='Apache',
url='https://github.com/heat-api/python-heatclient',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
install_requires=setup.parse_requirements(),
test_suite="nose.collector",
cmdclass=setup.get_cmdclass(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
entry_points={
'console_scripts': ['heat = heatclient.shell:main']
},
dependency_links=setup.parse_dependency_links(),
tests_require=setup.parse_requirements(['tools/test-requires']),
setup_requires=['setuptools-git>=0.4'],
)
| apache-2.0 | 5,432,592,217,602,906,000 | 35.862745 | 74 | 0.697872 | false |
mchrzanowski/ProjectEuler | src/python/Problem105.py | 1 | 3645 | '''
Created on Aug 18, 2012
@author: mchrzanowski
'''
import itertools
import os.path
import time
def do_two_subsets_equal_each_other(numbers, value_to_equal):
'''
exact subset problem.
return true if we have a subset that equals value_to_equal.
http://www.cs.dartmouth.edu/~ac/Teach/CS105-Winter05/Notes/nanda-scribe-3.pdf
'''
def merge_lists(first, second):
return first + second
def add_to_every_element(value_to_add, elements):
return map(lambda x: x + value_to_add, elements)
L = [[0]]
numbers = list(numbers) # we need to preserve position.
numbers.insert(0, 0) # we need a header for the below algo.
for i in xrange(1, len(numbers)):
L.append(list())
raw_list = merge_lists(L[i - 1],
add_to_every_element(numbers[i], L[i - 1]))
for element in raw_list:
if value_to_equal == element:
return True
elif element < value_to_equal:
L[i].append(element)
return False
def does_larger_subset_sum_to_a_larger_number(B, C):
'''
for any two subsets B & C, if len(B) > len(C),
then sum(B) > sum(C). otherwise, return False
'''
if len(B) > len(C) and sum(B) <= sum(C):
return False
if len(C) > len(B) and sum(C) <= sum(B):
return False
return True
def all_subsets(numbers):
'''
return a set of sets, each containing
two subsets of this number collection
'''
subsets = set()
for first_length in xrange(1, len(numbers)):
for first_combo in itertools.combinations(numbers, first_length):
disjoint_numbers = [number for number in numbers if number not in first_combo]
for second_length in xrange(1, len(disjoint_numbers) + 1):
for second_combo in itertools.combinations(disjoint_numbers, second_length):
subsets.add(frozenset((first_combo, second_combo,)))
return subsets
def all_partitions(numbers):
'''
return a list of tuples, each containing all the various
partitions of this number collection
'''
partitions = list()
for length in xrange(1, len(numbers)):
for combination in itertools.combinations(numbers, length):
numbers_sans_combination = [element for element in numbers if element not in combination]
partitions.append((numbers_sans_combination, combination))
return partitions
def is_group_acceptable(numbers):
'''
verify the properties of equality and
of larger sets summing to larger numbers
for this given group of numbers
'''
for partition in all_partitions(numbers):
first, second = partition
if do_two_subsets_equal_each_other(first, sum(second)):
return False
for subset in all_subsets(numbers):
first, second = subset
if not does_larger_subset_sum_to_a_larger_number(first, second):
return False
return True
def main():
with open(os.path.join(os.curdir,
'./requiredFiles/Problem105Sets.txt')) as f:
special_sets = list()
for row in f:
numbers = set()
for number in row.split(","):
numbers.add(int(number))
if is_group_acceptable(numbers):
special_sets.append(numbers)
total = sum(sum(special_set) for special_set in special_sets)
print "Total: %d" % total
if __name__ == '__main__':
begin = time.time()
main()
end = time.time()
print "Runtime: %f seconds." % (end - begin)
| mit | 7,038,937,051,428,532,000 | 26.201493 | 101 | 0.601646 | false |
erix5son/Tennis-Modelling | ranking_systems/tests/test_glicko2_ranking.py | 1 | 1307 | # -*- coding: utf-8 -*-
__author__ = 'Heungsub Lee'
from glicko2 import Glicko2, WIN, DRAW, LOSS
class almost(object):
def __init__(self, val, precision=3):
self.val = val
self.precision = precision
def almost_equals(self, val1, val2):
if round(val1, self.precision) == round(val2, self.precision):
return True
fmt = '%.{0}f'.format(self.precision)
mantissa = lambda f: int((fmt % f).replace('.', ''))
return abs(mantissa(val1) - mantissa(val2)) <= 1
def __eq__(self, other):
try:
if not self.almost_equals(self.val.volatility, other.volatility):
return False
except AttributeError:
pass
return (self.almost_equals(self.val.mu, other.mu) and
self.almost_equals(self.val.sigma, other.sigma))
def __repr__(self):
return repr(self.val)
def test_glickman_example():
env = Glicko2(tau=0.5)
r1 = env.create_rating(1500, 200, 0.06)
r2 = env.create_rating(1400, 30)
r3 = env.create_rating(1550, 100)
r4 = env.create_rating(1700, 300)
rated = env.rate(r1, [(WIN, r2), (LOSS, r3), (LOSS, r4)])
# env.create_rating2(1464.06, 151.52, 0.05999)
assert almost(rated) == env.create_rating(1464.051, 151.515, 0.05999)
| mit | -5,102,957,927,354,244,000 | 30.119048 | 77 | 0.58684 | false |
NUKnightLab/TimelineJS3 | website/app.py | 1 | 2893 | '''
Main entrypoint file. To run:
$ python serve.py
'''
from flask import Flask
from flask import request
from flask import render_template
from flask import json
from flask import send_from_directory
import importlib
import traceback
import sys
import os
# Add current directory to sys.path
site_dir = os.path.dirname(os.path.abspath(__file__))
examples_json = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'examples.json')
faq_json = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'faq.json')
if site_dir not in sys.path:
sys.path.append(site_dir)
# Set default FLASK_SETTINGS_MODULE for debug mode
if not os.environ.get('FLASK_SETTINGS_MODULE', ''):
os.environ['FLASK_SETTINGS_MODULE'] = 'core.settings.loc'
# Import settings module for the inject_static_url context processor.
settings_module = os.environ.get('FLASK_SETTINGS_MODULE')
try:
importlib.import_module(settings_module)
except ImportError, e:
raise ImportError(
"Could not import settings '%s' (Is it on sys.path?): %s" \
% (settings_module, e))
settings = sys.modules[settings_module]
app = Flask(__name__)
dist_dir = os.path.join(settings.PROJECT_ROOT, 'dist')
@app.context_processor
def inject_static_url():
"""
Inject the variables 'static_url' and 'STATIC_URL' into the templates to
avoid hard-coded paths to static files. Grab it from the environment
variable STATIC_URL, or use the default. Never has a trailing slash.
"""
static_url = settings.STATIC_URL or app.static_url_path
if static_url.endswith('/'):
static_url = static_url.rstrip('/')
return dict(static_url=static_url, STATIC_URL=static_url)
@app.context_processor
def inject_index_data():
return dict(examples=json.load(open(examples_json)),faqs=json.load(open(faq_json)))
@app.route('/dist/<path:path>')
def catch_build(path):
"""
Serve /dist/... urls from the build directory
"""
return send_from_directory(dist_dir, path)
@app.route('/')
@app.route('/<path:path>')
def catch_all(path='index.html', context=None):
"""Catch-all function which serves every URL."""
context = context or {}
if not os.path.splitext(path)[1]:
path = os.path.join(path, 'index.html')
return render_template(path, **context)
if __name__ == "__main__":
import getopt
ssl_context = None
port = 5000
try:
opts, args = getopt.getopt(sys.argv[1:], "sp:", ["port="])
for opt, arg in opts:
if opt == '-s':
ssl_context = 'adhoc'
elif opt in ('-p', '--port'):
port = int(arg)
else:
print 'Usage: app.py [-s]'
sys.exit(1)
except getopt.GetoptError:
print 'Usage: app.py [-s] [-p port]'
sys.exit(1)
app.run(host='0.0.0.0', port=port, debug=True, ssl_context=ssl_context)
| mpl-2.0 | 8,731,970,829,104,129,000 | 27.643564 | 91 | 0.645351 | false |
jasonmccampbell/numpy-refactor-sprint | numpy/ma/tests/test_core.py | 1 | 130988 | # pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
__author__ = "Pierre GF Gerard-Marchant"
import types
import warnings
import numpy as np
import numpy.core.fromnumeric as fromnumeric
from numpy import ndarray
from numpy.ma.testutils import *
import numpy.ma.core
from numpy.ma.core import *
from numpy.compat import asbytes, asbytes_nested
pi = np.pi
import sys
if sys.version_info[0] >= 3:
from functools import reduce
#..............................................................................
class TestMaskedArray(TestCase):
"Base test class for MaskedArrays."
def setUp (self):
"Base data definition."
x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
"Tests some basic array attributes."
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
"Checks masking a scalar"
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
"Test of basic array creation and properties in 1 dimension."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size , reduce(lambda x, y:x * y, s))
assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
"Test of basic array creation and properties in 2 dimensions."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
#
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size , reduce(lambda x, y:x * y, s))
assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
"Tests concatenations."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
"Tests concatenations."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
#
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
"Tests the concatenation on flexible arrays."
data = masked_array(zip(np.random.rand(10),
np.arange(10)),
dtype=[('a', float), ('b', int)])
#
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
"Check the use of ndmin"
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
"Make sure we're not losing the original mask w/ ndmin"
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
"Tests how masks are initialized at the creation of Maskedarrays."
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
def test_creation_with_list_of_maskedarrays(self):
"Tests creaating a masked array from alist of masked arrays."
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
#
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_fix_invalid(self):
"Checks fix_invalid."
err_status_ini = np.geterr()
try:
np.seterr(invalid='ignore')
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
finally:
np.seterr(**err_status_ini)
def test_maskedelement(self):
"Test of masked element"
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_set_element_as_object(self):
"""Tests setting elements with object"""
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
#
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
"Tests conversions and indexing"
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
junk, garbage = str(x2), repr(x2)
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert type(x2[1]) is type(x1[1])
assert x1[1] == x2[1]
assert x2[0] is masked
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert allequal(getmask(x2), array([0, 1, 0, 0]))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert allequal(getmask(x3), array([0, 1, 1, 0]))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert allequal(getmask(x4), array([0, 1, 1, 0]))
assert allequal(x4, array([1, 2, 3, 4]))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert x1[1:1].shape == (0,)
def test_copy(self):
"Tests of some subtle points of copying and sizing."
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
warnings.simplefilter('ignore', DeprecationWarning)
x1 = np.arange(5)
y1 = array(x1, mask=m)
#self.assertTrue( y1._data is x1)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.raw_data()))
#self.assertTrue( y1.mask is m)
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
warnings.simplefilter('default', DeprecationWarning)
y1a = array(y1)
#self.assertTrue( y1a.raw_data() is y1.raw_data())
self.assertTrue(y1a._data.__array_interface__ == y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
#self.assertTrue( y2.raw_data() is x1)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
#self.assertTrue( y2.mask is m)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
#self.assertTrue( y2.mask is not m)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
#
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
#
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
#
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_pickling(self):
"Tests pickling"
import cPickle
a = arange(10)
a[::3] = masked
a.fill_value = 999
a_pickled = cPickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
"Test pickling w/ a subclass of ndarray"
import cPickle
a = array(np.matrix(range(10)), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = cPickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_wstructured(self):
"Tests pickling w/ structured array"
import cPickle
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = cPickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
"Tests pickling w/ F_CONTIGUOUS arrays"
import cPickle
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = cPickle.loads(cPickle.dumps(b))
assert_equal(test, b)
# def test_pickling_oddity(self):
# "Test some pickling oddity"
# import cPickle
# a = array([{'a':1}, {'b':2}, 3], dtype=object)
# test = cPickle.loads(cPickle.dumps(a))
# assert_equal(test, a)
def test_single_element_subscript(self):
"Tests single element subscripts of Maskedarrays."
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
"Tests some communication issues with Python."
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
#
warnings.simplefilter('ignore', UserWarning)
assert np.isnan(float(array([1], mask=[1])))
warnings.simplefilter('default', UserWarning)
#
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
"Test of other odd features"
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert x[1, 0] == 12
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
#
x = arange(10)
x[3] = masked
assert str(x[3]) == str(masked)
c = x >= 8
assert count(where(c, masked, masked)) == 0
assert shape(where(c, masked, masked)) == c.shape
#
z = masked_where(c, x)
assert z.dtype is x.dtype
assert z[3] is masked
assert z[4] is not masked
assert z[7] is not masked
assert z[8] is masked
assert z[9] is masked
assert_equal(x, z)
def test_oddfeatures_2(self):
"Tests some more features."
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert z[0] is masked
assert z[1] is not masked
assert z[2] is masked
def test_oddfeatures_3(self):
"""Tests some generic features."""
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_w_flexible_dtype(self):
"Test filled w/ flexible dtype"
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_w_mvoid(self):
"Test filled w/ mvoid"
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_w_nested_dtype(self):
"Test filled w/ nested dtype"
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
#
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
def test_optinfo_propagation(self):
"Checks that _optinfo dictionary isn't back-propagated"
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
"Test printing a masked array w/ fancy dtype."
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
def test_flatten_structured_array(self):
"Test flatten_structured_array on arrays"
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
"Test creating a mvoid object"
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert(isinstance(f, mvoid))
#
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert(isinstance(a, mvoid))
#
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert(isinstance(f, mvoid))
def test_mvoid_getitem(self):
"Test mvoid.__getitem__"
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, np.void))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
def test_mvoid_iter(self):
"Test iteration on __getitem__"
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
"Test printing a mvoid"
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
#------------------------------------------------------------------------------
class TestMaskedArrayArithmetic(TestCase):
"Base test class for MaskedArrays."
def setUp (self):
"Base data definition."
x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic (self):
"Test of basic arithmetic."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
#
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
#
z = x / y[None, :]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
#
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
"Tests mixed arithmetics."
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
"Tests some scalar arithmetics on MaskedArrays."
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
"Tests (in)equality on masked snigleton"
a = array([1, 2, 3], mask=[1, 1, 0])
assert((a[0] == 0) is masked)
assert((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
"Checks that there's no collapsing to masked"
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert y is masked
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
"Check that we're not losing the shape of a singleton"
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs (self):
"Test various functions such as sin, cos."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func (self):
"Tests count"
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
if sys.version_info[0] >= 3:
self.assertTrue(isinstance(count(ott), np.integer))
else:
self.assertTrue(isinstance(count(ott), int))
assert_equal(3, count(ott))
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = ott.reshape((2, 2))
assert isinstance(count(ott, 0), ndarray)
if sys.version_info[0] >= 3:
assert isinstance(count(ott), np.integer)
else:
assert isinstance(count(ott), types.IntType)
assert_equal(3, count(ott))
assert getmask(count(ott, 0)) is nomask
assert_equal([1, 2], count(ott, 0))
def test_minmax_func (self):
"Tests minimum and maximum."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xr = np.ravel(x) #max doesn't work if shaped
xmr = ravel(xm)
assert_equal(max(xr), maximum(xmr)) #true because of careful selection of data
assert_equal(min(xr), minimum(xmr)) #true because of careful selection of data
#
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert minimum(x) == 0
assert maximum(x) == 4
#
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
#
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
#
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
#
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
"Test np.min/maximum.reduce on array w/ full False mask"
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
"Tests the min/max functions with explicit outputs"
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
"Additional tests on max/min"
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
#
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod (self):
"Tests add, sum, product."
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
"Test binary operations on 2D data"
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
#
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
"Test domained binary operations on 2D data"
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
#
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
"Check that we don't shrink a mask when not wanted"
# Binary operations
a = masked_array([1, 2, 3], mask=[False, False, False], shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
"Tests mod"
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
"Test of take, transpose, inner, outer products"
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
def test_imag_real(self):
"Check complex"
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
#
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
#
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
#
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
self.assertTrue(output[0] is masked)
def test_eq_on_structured(self):
"Test the equality of structured arrays"
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
"Test the inequality of structured arrays"
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_w_None(self):
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
a = masked
assert_equal(a == None, masked)
def test_eq_w_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
"Check that the mask is not back-propagated when using numpy functions"
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
#
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
#
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
#------------------------------------------------------------------------------
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
"Tests the keep mask flag"
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
"Test hard_mask"
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
#
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
"Another test of hardmask"
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
"OK, yet another test of hardmask"
"Make sure that harden_mask/soften_mask//unshare_mask retursn self"
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
"Checks the behaviour of _smallmask"
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
"Tests .shrink_mask()"
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
"Test flat on masked_matrices"
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
#
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
#------------------------------------------------------------------------------
class TestFillingValues(TestCase):
#
def test_check_on_scalar(self):
"Test _check_fill_value"
_check_fill_value = np.ma.core._check_fill_value
#
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
#
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value("|S3"))
#
fval = _check_fill_value(1e+20, int)
assert_equal(fval, default_fill_value(0))
def test_check_on_fields(self):
"Tests _check_fill_value with records"
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
fill_val = np.array((-999, -12345678.9, "???"),
dtype=[("A", int), ("B", float), ("C", "|S3")])
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_value = np.array((-999, -12345678.9, "???"), dtype=object)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#
fill_value = np.array((-999, -12345678.9, "???"))
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
"Tests the behavior of fill_value during conversion"
# We had a tailored comment to make sure special attributes are properly
# dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
#
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
#
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
#
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
#
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
"Yet more fun with the fill_value"
data = masked_array([1, 2, 3], fill_value= -999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
#
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
#
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
#
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
"Tests yet more exotic flexible dtypes"
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
#
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
def test_extremum_fill_value(self):
"Tests extremum fill values for flexible type."
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
#
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
#
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
"Test setting fill_value on individual fields"
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(zip([1, 2, 3], [4, 5, 6]),
fill_value=(-999, -999), dtype=ndtype)
f = a._fill_value
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(zip([1, 2, 3], [4, 5, 6]), dtype=[('a', int), ('b', int)])
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
"Check that fill_value is always defined for structured arrays"
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
"Test adding a fill_value parameter to empty/ones/zeros"
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
#
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
#
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
#
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
#------------------------------------------------------------------------------
class TestUfuncs(TestCase):
"Test class for the application of ufuncs on MaskedArrays."
def setUp(self):
"Base data definition."
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
"Tests new ufuncs on MaskedArrays."
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
"Tests reduce on MaskedArrays."
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
"Tests extrema on MaskedArrays."
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
"Check that the mask of the result is a ndarray (not a MaskedArray...)"
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
#------------------------------------------------------------------------------
class TestMaskedArrayInPlaceArithmetics(TestCase):
"Test MaskedArray Arithmetics"
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
def test_inplace_addition_scalar(self):
"""Test of inplace additions"""
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
#
warnings.simplefilter('ignore', DeprecationWarning)
(x, _, xm) = self.floatdata
id1 = x.raw_data().ctypes._data
x += 1.
assert (id1 == x.raw_data().ctypes._data)
assert_equal(x, y + 1.)
warnings.simplefilter('default', DeprecationWarning)
def test_inplace_addition_array(self):
"""Test of inplace additions"""
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
"""Test of inplace subtractions"""
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
"""Test of inplace subtractions"""
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
"""Test of inplace multiplication"""
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
"""Test of inplace multiplication"""
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
"""Test of inplace division"""
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x /= 2
assert_equal(x, y)
xm /= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
"""Test of inplace division"""
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
"""Test of inplace division"""
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
#
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
#
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
#
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(xm._data, [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
def test_datafriendly_add(self):
"Test keeping data w/ (inplace) addition"
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
"Test keeping data w/ (inplace) subtraction"
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
"Test keeping data w/ (inplace) multiplication"
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
"Test keeping data w/ (inplace) division"
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
"Test keeping data w/ (inplace) power"
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
#
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
#
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
#
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
#------------------------------------------------------------------------------
class TestMaskedArrayMethods(TestCase):
"Test class for miscellaneous MaskedArrays methods."
def setUp(self):
"Base data definition."
x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
"Tests some MaskedArray methods."
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
#
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
"Tests allclose on arrays"
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
def test_allany(self):
"""Checks the any/all methods/functions."""
x = np.array([[ 0.13, 0.26, 0.90],
[ 0.28, 0.33, 0.63],
[ 0.31, 0.87, 0.70]])
m = np.array([[ True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
xbig = np.array([[False, False, True],
[False, False, True],
[False, True, True]], dtype=np.bool_)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
#
assert (mxbig.all() == False)
assert (mxbig.any() == True)
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
#
assert (mxsmall.all() == False)
assert (mxsmall.any() == True)
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[ 0.13, 0.26, 0.90],
[ 0.28, 0.33, 0.63],
[ 0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[ True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
#
assert (mXbig.all() == False)
assert (mXbig.any() == True)
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([ True, True, True]).T)
#
assert (mXsmall.all() == False)
assert (mXsmall.any() == True)
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
"Some fun with all and any"
store = empty(1, dtype=bool)
full = array([1, 2, 3], mask=True)
#
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
#
store = empty(1, dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
"Tests argmin & argmax on MaskedArrays."
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
#
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
#
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
#
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
"Tests clip on MaskedArrays."
x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
"test compress"
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
#
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
#
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
#
a = masked_array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
#
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
"Tests compressed"
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
#
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
"Tests empty/like"
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
#
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
#
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
def test_put(self):
"Tests put."
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
#self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
#
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
#
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_hardmask(self):
"Tests put on hardmask"
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
"Tests ravel"
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(a._mask.shape, a.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(a._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(a.shape, (1, 5))
assert_equal(a._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
def test_reshape(self):
"Tests reshape"
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
"Test sort"
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
#
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
#
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
#
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
#
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
#
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
#
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
"Check sort of 2D array."
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
"Test sort on flexible dtype."
a = array([(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
#
test = sort(a)
b = array([(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
#
test = sort(a, endwith=False)
b = array([(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
"Test argsort"
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
"Check squeeze"
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
"Tests swapaxes on MaskedArrays."
x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
#
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
"Tests take"
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
#
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
"Test take w/ masked indices"
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None, :] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[ 9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[ 9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
"Tests to list"
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr']),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
"Test mvoid.tolist: make sure we return a standard Python object"
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert(not isinstance(item, np.generic))
def test_toflex(self):
"Test the conversion to records"
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
#
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
#
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
#
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
"Test the reconstruction of a masked_array from a record"
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
#
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
#
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
"Test a _arraymethod w/ n argument"
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
#
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
#------------------------------------------------------------------------------
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
"Base data definition."
x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
"Tests cumsum & cumprod on MaskedArrays."
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
#
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
"Tests cumsum/cumprod w/ output"
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
#
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
#
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
"Tests ptp on MaskedArrays."
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_sum_object(self):
"Test sum on object dtype"
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
"Test prod on object dtype"
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
"Test mean/anom on object dtype"
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
"Tests trace on MaskedArrays."
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0))
def test_varstd(self):
"Tests var & std on MaskedArrays."
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std())
def test_varstd_specialcases(self):
"Test a special case for var"
nout = np.empty(1, dtype=float)
mout = empty(1, dtype=float)
#
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
_ = method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
_ = method(out=nout)
self.assertTrue(np.isnan(nout))
#
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
_ = method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
_ = method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
"Test diag"
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
"Test the combination nomask & methods w/ axis"
a = array([[1, 2, 3], [4, 5, 6]])
#
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
#
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
#
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
#
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
#------------------------------------------------------------------------------
class TestMaskedArrayMathMethodsComplex(TestCase):
"Test class for miscellaneous MaskedArrays methods."
def setUp(self):
"Base data definition."
x = np.array([ 8.375j, 7.545j, 8.828j, 8.5j , 1.757j, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
"Tests var & std on MaskedArrays."
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std())
#------------------------------------------------------------------------------
class TestMaskedArrayFunctions(TestCase):
"Test class for miscellaneous functions."
def setUp(self):
x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
"Tests masking functions."
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
"""Tests some generic features."""
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_otherfunctions(self):
assert_equal(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4])
assert_equal(masked_outside(range(5), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(range(5), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0])
assert_equal(masked_outside(array(range(5), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1])
assert_equal(masked_equal(array(range(5), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert z[0] is masked
assert z[1] is not masked
assert z[2] is masked
def test_round_with_output(self):
"Testing round with an explicit output"
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
#
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_where(self):
"Test the where function"
x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
#
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9., -9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
#
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c , x, masked)
assert z.dtype is x.dtype
assert z[3] is masked
assert z[4] is masked
assert z[7] is masked
assert z[8] is not masked
assert z[9] is not masked
assert_equal(x, z)
# Set True to masked
z = where(c , masked, x)
assert z.dtype is x.dtype
assert z[3] is masked
assert z[4] is not masked
assert z[7] is not masked
assert z[8] is masked
assert z[9] is masked
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert z[0] is masked
assert z[1] is not masked
assert z[2] is masked
#
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert getmask(zm) is nomask
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
"Test the type conservation with where"
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
"Test choose"
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
"Test choose with an explicit out keyword"
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
#
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
"Test make_mask_descr"
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
"Test make_mask"
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using True as input. Won't work, but keep it for the kicks
# test = mask_or(mask, True)
# control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)
# assert_equal(test, control)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
"Tests flatten mask"
# Standarad dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([ 0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
"Test functions on ndarrays"
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
#------------------------------------------------------------------------------
class TestMaskedFields(TestCase):
#
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(zip(ilist, flist, slist), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
"Check setting an element of a record)"
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = masked
#
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
#
def test_getmaskarray(self):
"Test getmaskarray on flexible dtype"
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0) , (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1) , (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
#
def test_view(self):
"Test view w/ flexible dtype"
iterator = zip(np.arange(10), np.random.rand(10))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
#
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
#
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(zip(np.random.rand(10), np.arange(10)), dtype=ndtype)
a.mask = np.array(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0]),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], np.void))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
#------------------------------------------------------------------------------
class TestMaskedView(TestCase):
#
def setUp(self):
iterator = zip(np.arange(10), np.random.rand(10))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
#
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
#
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
#
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
#
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
#
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
#
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
#
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
#
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, data[-1])
#
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
#
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
###############################################################################
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 3,284,385,971,214,614,000 | 36.629417 | 109 | 0.488747 | false |
siemens/django-dingos-authoring | dingos_authoring/read_settings.py | 1 | 2558 | # Copyright (c) Siemens AG, 2014
#
# This file is part of MANTIS. MANTIS is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2
# of the License, or(at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from django.conf import settings
from django.core.files.storage import FileSystemStorage
import dingos_authoring
if settings.configured and 'DINGOS_AUTHORING' in dir(settings):
dingos_authoring.DINGOS_AUTHORING_IMPORTER_REGISTRY = settings.DINGOS_AUTHORING.get('IMPORTER_REGISTRY', dingos_authoring.DINGOS_AUTHORING_IMPORTER_REGISTRY)
if settings.configured and 'DINGOS_AUTHORING' in dir(settings):
dingos_authoring.DINGOS_AUTHORING_CELERY_BUG_WORKAROUND = settings.DINGOS_AUTHORING.get('CELERY_BUG_WORKAROUND', dingos_authoring.DINGOS_AUTHORING_CELERY_BUG_WORKAROUND)
if settings.configured and 'DINGOS_AUTHORING' in dir(settings):
if not "DATA_FILESYSTEM_ROOT" in settings.DINGOS_AUTHORING:
raise NotImplementedError("Please configure a DATA_FILESYSTEM_ROOT directory in the DINGOS_AUTHORING settings (look "
"at how the MEDIA directory is defined and define an appropriate directory "
"for storing authored data (usually imported XMLs) on the filesystem. "
"Example setting : root('authoring','imports')")
else:
dingos_authoring.DINGOS_AUTHORING_DATA_FILESYSTEM_ROOT = settings.DINGOS_AUTHORING['DATA_FILESYSTEM_ROOT']
dingos_authoring.DINGOS_AUTHORING_DATA_STORAGE = FileSystemStorage(location=dingos_authoring.DINGOS_AUTHORING_DATA_FILESYSTEM_ROOT)
# We do not want the blobs to be directly available via URL.
# Reading the code it seems that setting 'base_url=None' in
# the __init__ arguments does not help, because __init__
# then choses the media URL as default url. So we have
# to set it explicitly after __init__ is done.
dingos_authoring.DINGOS_AUTHORING_DATA_STORAGE.base_url=None | gpl-2.0 | -708,127,362,642,221,300 | 53.446809 | 173 | 0.731431 | false |
rob-earwaker/rail | rail.py | 1 | 7049 | import functools
import inspect
def identity(value):
return value
def not_(value):
return not value
def raise_(exception=None):
if exception is None:
raise
else:
raise exception
def try_(func, handle):
def try_func(arg):
try:
return func(arg)
except Exception as exception:
return handle(exception)
return try_func
class UnmatchedValueError(Exception):
def __init__(self, value):
self.value = value
super().__init__(str(value))
def match(*args):
return lambda value: pipe(
next(
(map_func for is_match, map_func in args if is_match(value)),
lambda _: pipe(value, UnmatchedValueError, raise_)
),
call_with(value)
)
def match_type(*args):
return match(*[
(lambda value, types=types: isinstance(value, types), map_func)
for types, map_func in args
])
def match_length(*args):
return match(*[
(
lambda value, match_len=match_len: pipe(value, len, match_len),
map_func
)
for match_len, map_func in args
])
class NamedArg:
NO_VALUE = object()
NO_DEFAULT = object()
def __init__(self, name, default=NO_DEFAULT, value=NO_VALUE):
self.name = name
self.default = default
self.value = value
def has_value(self):
return self.value != NamedArg.NO_VALUE
def has_value_or_default(self):
return self.has_value() or self.default != NamedArg.NO_DEFAULT
def value_or_default(self):
return self.value if self.has_value() else self.default
def with_value(self, value):
return NamedArg(self.name, self.default, value)
class Args:
def __init__(self, named_args, list_args, keyword_args):
self.named_args = named_args
self.list_args = list_args
self.keyword_args = keyword_args
@classmethod
def from_func(cls, func):
return pipe(
inspect.getargspec(func),
lambda argspec: pipe(
argspec.defaults if argspec.defaults is not None else (),
reversed,
list,
lambda rdefaults: pipe(
argspec.args,
reversed,
lambda rargs: [
NamedArg(name, rdefaults[index])
if len(rdefaults) > index else NamedArg(name)
for index, name in enumerate(rargs)
]
)
),
reversed,
list,
lambda named_args: cls(named_args, list_args=(), keyword_args={})
)
def get_named_arg_index(self, is_match):
return pipe(
self.named_args,
lambda args:
(index for index, arg in enumerate(args) if is_match(arg)),
lambda iterator: next(iterator, None)
)
def apply_named_arg(self, index, value):
return pipe(
self.named_args.copy(),
tee(
lambda named_args: pipe(
named_args.pop(index),
lambda arg: named_args.insert(index, arg.with_value(value))
)
),
lambda named_args: Args(
named_args, self.list_args, self.keyword_args.copy()
)
)
def apply_list_arg(self, value):
return pipe(
self.list_args + (value,),
lambda list_args: Args(
self.named_args.copy(), list_args, self.keyword_args.copy()
)
)
def apply_keyword_arg(self, name, value):
return pipe(
self.keyword_args.copy(),
tee(lambda keyword_args: keyword_args.update({name: value})),
lambda keyword_args: Args(
self.named_args.copy(), self.list_args, keyword_args
)
)
def apply_arg(self, value):
return pipe(
self.get_named_arg_index(lambda arg: not arg.has_value()),
lambda index: (
self.apply_named_arg(index, value) if index is not None
else self.apply_list_arg(value)
)
)
def apply_kwarg(self, name, value):
return pipe(
self.get_named_arg_index(lambda arg: arg.name == name),
lambda index: (
self.apply_named_arg(index, value) if index is not None
else self.apply_keyword_arg(name, value)
)
)
def apply_args(self, *args):
return functools.reduce(
lambda args, value: args.apply_arg(value), args, self
)
def apply_kwargs(self, **kwargs):
return functools.reduce(
lambda args, name: args.apply_kwarg(name, kwargs[name]),
kwargs,
self
)
def apply(self, *args, **kwargs):
return self.apply_args(*args).apply_kwargs(**kwargs)
def all_present(self):
return all(arg.has_value_or_default() for arg in self.named_args)
def named_arg_values(self):
return tuple(arg.value_or_default() for arg in self.named_args)
def execute(self, func):
args = self.named_arg_values() + self.list_args
return func(*args, **self.keyword_args)
def partial(func, applied_args=None):
@functools.wraps(func)
def partial_func(*args, **kwargs):
return pipe(
Args.from_func(func) if applied_args is None else applied_args,
lambda existing_args: existing_args.apply(*args, **kwargs),
lambda new_args: (
new_args.execute(func) if new_args.all_present()
else partial(func, new_args)
)
)
return partial_func
def compose(*funcs):
return functools.reduce(
lambda func1, func2: lambda arg: func2(func1(arg)), funcs, identity
)
def pipe(value, *funcs):
func = compose(*funcs)
return func(value)
def tee(*funcs):
return lambda arg: pipe(
arg,
compose(*funcs),
lambda _: arg
)
@partial
def call_with(value, func):
return func(value)
@partial
def lt(value2, value1):
return value1 < value2
@partial
def le(value2, value1):
return value1 <= value2
@partial
def eq(value2, value1):
return value1 == value2
@partial
def ne(value2, value1):
return value1 != value2
@partial
def gt(value2, value1):
return value1 > value2
@partial
def ge(value2, value1):
return value1 >= value2
class Track:
def __init__(self, func=identity):
self.func = func
def __call__(self, arg):
return self.func(arg)
def compose(self, *funcs):
return Track(compose(self.func, *funcs))
def fold(self, success_func, handle_func):
return self.compose(success_func).handle(handle_func)
def handle(self, *funcs):
return Track(try_(self.func, handle=compose(*funcs)))
def tee(self, *funcs):
return self.compose(tee(*funcs))
| mit | -5,303,923,054,830,463,000 | 24.085409 | 79 | 0.555256 | false |
mamchecker/mamchecker | mamchecker/r/i/__init__.py | 1 | 1562 | # -*- coding: utf-8 -*-
import random
from sympy.abc import x
from sympy import log, latex
from mamchecker.hlp import Struct, norm_int as norm
jsFuncs = {'exp': 'return Math.pow(({0}),x-({1}))+({2})',
'log': 'if (x-({0})>0) return Math.log(x-({0}))+({1})',
'pow': 'return ({0})*Math.pow(x-({1}),({2}))+({3})'}
def given():
# r,i,n,m=143,3,5,50
N = 4
rs = lambda r: random.sample(r, 1)[0]
def gete():
e = e0, e1, e2 = rs([0.2, 0.5, 2, 3]), rs(
[-2, -1, 0, 1, 2]), rs([-2, -1, 0, 1, 2])
ee = e0 ** (x - e1) + e2
jse = jsFuncs['exp'].format(*e)
return (latex(ee), jse)
def getl():
l = l0, l1 = rs([-2, -1, 0, 1, 2]), rs([-2, -1, 0, 1, 2])
el = log(x - l0) + l1
jsl = jsFuncs['log'].format(*l)
return (latex(el), jsl)
def getp():
p = (p0, p1, p2, p3) = (
rs([-2, -1, -1.0 / 2, 1.0 / 2, 1, 2]),
rs([-2, -1, 0, 1, 2]),
rs([-0.2, -0.5, -2, -3, 0.2, 0.5, 2, 3]),
rs([-2, -1, 0, 1, 2]))
ep = p0 * (x - p1) ** p2 + p3
jsp = jsFuncs['pow'].format(*p)
return (latex(ep), jsp)
funcs = []
while len(funcs) < N:
f = rs([gete] * 100 + [getl] * 25 + [getp] * 1200)
while True:
nf = f()
if nf not in funcs:
funcs.append(nf)
break
order = range(len(funcs))
random.shuffle(order)
g = Struct(funcs=funcs, order=order)
return g
def calc(g):
return [o + 1 for o in g.order]
| gpl-3.0 | 5,483,953,586,565,812,000 | 27.4 | 66 | 0.425096 | false |
zambreno/RCL | sccCyGraph/graphs/check.py | 1 | 1207 | #!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by iOsama on 2013-09-24.
Copyright (c) 2013 __MyCompanyName__. All rights reserved.
"""
import sys
import os
inputfile = "reversed.mtx"
outputfile = inputfile + ".out"
x_value = 99999
def main():
# open files
fin = open(inputfile, 'r')
fout = open(outputfile, 'w')
# get graph info
line = fin.readline()
N = line.split(" ")[0]
M = line.split(" ")[1]
nonZeros = line.split(" ")[2]
if x_value > int(N):
print "ERROR: last node exceeds given N!"
exit(0)
# Count updated non-zeros
count = 0
for line in fin:
line = line.split(" ")
u = int(line[0])
v = int(line[1])
if u <= x_value and v <= x_value:
count += 1
# Write updated non-zeros
fout.write(str(x_value) + " " + str(x_value) + " " + str(count) + "\n")
fin.seek(1)
for line in fin:
line = line.split(" ")
u = int(line[0])
v = int(line[1])
if u <= x_value and v <= x_value:
if count > 1:
fout.write(str(u) + " " + str(v) + "\n")
count -= 1
else:
fout.write(str(u) + " " + str(v))
count -= 1
fin.close()
fout.close()
pass
if __name__ == '__main__':
main()
| apache-2.0 | -5,442,214,776,072,603,000 | 17.467742 | 72 | 0.544325 | false |
shawncaojob/LC | PY/361_bomb_enemy.py | 1 | 3162 | # 361. Bomb Enemy Add to List
# DescriptionHintsSubmissionsSolutions
# Total Accepted: 14111
# Total Submissions: 36526
# Difficulty: Medium
# Contributor: LeetCode
# Given a 2D grid, each cell is either a wall 'W', an enemy 'E' or empty '0' (the number zero), return the maximum enemies you can kill using one bomb.
# The bomb kills all the enemies in the same row and column from the planted point until it hits the wall since the wall is too strong to be destroyed.
# Note that you can only put the bomb at an empty cell.
#
# Example:
# For the given grid
#
# 0 E 0 0
# E 0 W E
# 0 E 0 0
#
# return 3. (Placing a bomb at (1,1) kills 3 enemies)
# Credits:
# Special thanks to @memoryless for adding this problem and creating all test cases.
# 2017.05.21
# When scanning. Memorizing rowhits and colhits
class Solution(object):
def maxKilledEnemies(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if not grid or not grid[0]: return 0
m, n = len(grid), len(grid[0])
res = 0
rowhits, colhits = 0, [0 for j in xrange(n)]
for i in xrange(m):
for j in xrange(n):
if j == 0 or grid[i][j-1] == 'W': # Update rowhits only at first col and after 'W'
rowhits = 0
for k in xrange(j, n):
if grid[i][k] == 'W': break
if grid[i][k] == 'E': rowhits += 1
if i == 0 or grid[i-1][j] == 'W' : # Update colhits only at first row and after 'W'
colhits[j] = 0
for k in xrange(i, m):
if grid[k][j] =="W": break
if grid[k][j] == 'E': colhits[j] += 1
if grid[i][j] == '0':
res = max(res, rowhits + colhits[j])
return res
# 2017.05.21
# Violence, m * n * (m + n)
class Solution(object):
def maxKilledEnemies(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if not grid or not grid[0]: return 0
m, n = len(grid), len(grid[0])
res = 0
for i in xrange(m):
for j in xrange(n):
if grid[i][j] == '0':
res = max(res, self.bomb(grid, i, j))
return res
def bomb(self, grid, i, j):
m, n = len(grid), len(grid[0])
cnt = 0
ii, jj = i + 1, j
while ii < m:
if grid[ii][jj] == 'W': break
if grid[ii][jj] == 'E': cnt += 1
ii += 1
ii, jj = i - 1, j
while ii >= 0:
if grid[ii][jj] == 'W': break
if grid[ii][jj] == 'E': cnt += 1
ii -= 1
ii, jj = i, j + 1
while jj < n:
if grid[ii][jj] == 'W': break
if grid[ii][jj] == 'E': cnt += 1
jj += 1
ii, jj = i, j - 1
while jj >= 0:
if grid[ii][jj] == 'W': break
if grid[ii][jj] == 'E': cnt += 1
jj -= 1
return cnt
| gpl-3.0 | -5,668,677,027,563,694,000 | 30.306931 | 151 | 0.463631 | false |
tomo-otsuka/normalize | normalize/__init__.py | 1 | 2300 | #
# This file is a part of the normalize python library
#
# normalize is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
#
# normalize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT license along with
# normalize. If not, refer to the upstream repository at
# http://github.com/hearsaycorp/normalize
#
from __future__ import absolute_import
from normalize.coll import DictCollection
from normalize.coll import ListCollection
import normalize.exc as exc
from normalize.property import LazyProperty
from normalize.property import LazySafeProperty
from normalize.property import make_property_type
from normalize.property import Property
from normalize.property import ROProperty
from normalize.property import SafeProperty
from normalize.property.coll import ListProperty
from normalize.property.json import JsonProperty
from normalize.property.json import JsonListProperty
from normalize.property.json import JsonCollectionProperty
from normalize.property.json import SafeJsonProperty
from normalize.record import Record
from normalize.record.meta import RecordMeta
from normalize.record.json import from_json
from normalize.record.json import JsonRecord
from normalize.record.json import JsonRecordList
from normalize.record.json import to_json
from normalize.selector import FieldSelector
from normalize.selector import FieldSelectorException
from normalize.selector import MultiFieldSelector
RecordList = ListCollection
JsonCollection = ListCollection
__all__ = [
"DictCollection",
"exc",
"FieldSelector",
"FieldSelectorException",
"from_json",
"JsonCollection", # deprecated - use JsonRecordList
"JsonCollectionProperty", # deprecated
"JsonListProperty",
"JsonProperty",
"JsonRecord",
"JsonRecordList",
"LazyProperty",
"LazySafeProperty",
"ListCollection",
"ListProperty",
"make_property_type",
"MultiFieldSelector",
"Property",
"ROProperty",
"Record",
"RecordList",
"RecordMeta",
"SafeJsonProperty",
"SafeProperty",
"to_json",
]
| mit | -5,659,478,559,726,994,000 | 30.081081 | 67 | 0.777826 | false |
knnniggett/weewx | bin/weewx/__init__.py | 1 | 4230 | #
# Copyright (c) 2009-2015 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
"""Package weewx, containing modules specific to the weewx runtime engine."""
import time
__version__="3.2.0a1"
# Holds the program launch time in unix epoch seconds:
# Useful for calculating 'uptime.'
launchtime_ts = time.time()
# Set to true for extra debug information:
debug = False
# Exit return codes
CMD_ERROR = 2
CONFIG_ERROR = 3
IO_ERROR = 4
DB_ERROR = 5
# Constants used to indicate a unit system:
METRIC = 0x10
METRICWX = 0x11
US = 0x01
#===============================================================================
# Define possible exceptions that could get thrown.
#===============================================================================
class WeeWxIOError(IOError):
"""Base class of exceptions thrown when encountering an I/O error with the console."""
class WakeupError(WeeWxIOError):
"""Exception thrown when unable to wake up or initially connect with the console"""
class CRCError(WeeWxIOError):
"""Exception thrown when unable to pass a CRC check."""
class RetriesExceeded(WeeWxIOError):
"""Exception thrown when max retries exceeded."""
class HardwareError(StandardError):
"""Exception thrown when an error is detected in the hardware."""
class UnknownArchiveType(HardwareError):
"""Exception thrown after reading an unrecognized archive type."""
class UnsupportedFeature(StandardError):
"""Exception thrown when attempting to access a feature that is not supported (yet)."""
class ViolatedPrecondition(StandardError):
"""Exception thrown when a function is called with violated preconditions."""
class StopNow(StandardError):
"""Exception thrown to stop the engine."""
class UninitializedDatabase(StandardError):
"""Exception thrown when attempting to use an uninitialized database."""
class UnknownDatabase(StandardError):
"""Exception thrown when attempting to use an unknown database."""
class UnknownBinding(StandardError):
"""Exception thrown when attempting to use an unknown data binding."""
class UnitError(ValueError):
"""Exception thrown when there is a mismatch in unit systems."""
#===============================================================================
# Possible event types.
#===============================================================================
class STARTUP(object):
"""Event issued when the engine first starts up. Services have not been loaded."""
class PRE_LOOP(object):
"""Event issued just before the main packet loop is started. Services have been loaded."""
class NEW_LOOP_PACKET(object):
"""Event issued when a new LOOP packet is available. The event contains attribute 'packet',
which is the new LOOP packet."""
class CHECK_LOOP(object):
"""Event issued in the main loop, right after a new LOOP packet has been processed. Generally,
it is used to throw an exception, breaking the main loop, so the console can be used
for other things."""
class END_ARCHIVE_PERIOD(object):
"""Event issued at the end of an archive period."""
class NEW_ARCHIVE_RECORD(object):
"""Event issued when a new archive record is available. The event contains attribute 'record',
which is the new archive record."""
class POST_LOOP(object):
"""Event issued right after the main loop has been broken. Services hook into this to
access the console for things other than generating LOOP packet."""
#===============================================================================
# Class Event
#===============================================================================
class Event(object):
"""Represents an event."""
def __init__(self, event_type, **argv):
self.event_type = event_type
for key in argv:
setattr(self, key, argv[key])
def __str__(self):
"""Return a string with a reasonable representation of the event."""
et = "Event type: %s | " % self.event_type
s = "; ".join("%s: %s" %(k, self.__dict__[k]) for k in self.__dict__ if k!="event_type")
return et + s
| gpl-3.0 | -6,725,898,098,226,328,000 | 37.108108 | 98 | 0.613475 | false |
vzantedeschi/L3SVMs | src/l3svms.py | 1 | 2823 | import time
from liblinearutil import *
from sklearn import cluster
from src.landmark import *
from src.projection import *
def learning(train_x,train_y,test_x,test_y,printf=print,CLUS=1,PCA_BOOL=False,LIN=True,LAND=10):
t2 = time.time()
if CLUS > 1:
# get clusterer
clusterer = cluster.MiniBatchKMeans(n_clusters=CLUS)
train_clusters = clusterer.fit(train_x).labels_
test_clusters = clusterer.predict(test_x)
else:
clusterer = None
train_clusters = None
test_clusters = None
t3 = time.time()
printf("clustering time:",t3-t2,"s")
# select landmarks
if PCA_BOOL:
landmarks = pca_landmarks(train_x.toarray(),LAND)
else:
landmarks = random_landmarks(train_x,LAND)
# centered kernel
u = None
t2 = time.time()
printf("landmarks selection time:",t2-t3,"s")
t2 = time.time()
# project data
# tr_x = project(train_x,landmarks,clusters=train_clusters,unit_vectors=u,linear=LIN)
tr_x = parallelized_projection(-1,train_x,landmarks,clusters=train_clusters,unit_vectors=u,linear=LIN)
t3 = time.time()
printf("projection time:",t3-t2,"s")
t3 = time.time()
# tuning
if LIN:
tr_x = parallelized_projection(-1,train_x,landmarks,clusters=train_clusters,unit_vectors=u,linear=LIN)
best_C,_,_ = train(train_y, tr_x, '-C -s 2 -B 1 -q')
best_G = None
else:
best_G,best_C,best_acc = 0,0,0
for g in [10**i for i in range(-3,3)]:
tr_x = parallelized_projection(-1,train_x,landmarks,clusters=train_clusters,unit_vectors=u,linear=LIN,gamma=g)
c,_,score = train(train_y, tr_x, '-C -s 2 -B 1 -q')
if score > best_acc:
best_C = c
best_G = g
best_acc = score
tr_x = parallelized_projection(-1,train_x,landmarks,clusters=train_clusters,unit_vectors=u,linear=LIN,gamma=best_G)
print("Best C =",best_C)
print("Best Gamma =",best_G,"\n")
t4 = time.time()
printf("tuning time:",t4-t3,"s")
# training
model = train(train_y, tr_x, '-c {} -s 2 -B 1 -q'.format(best_C))
assert model.nr_feature == LAND*CLUS
t5 = time.time()
printf("training time:",t5-t4,"s")
te_x = parallelized_projection(-1,test_x,landmarks,clusters=test_clusters,unit_vectors=u,linear=LIN,gamma=best_G)
# te_x = project(test_x,landmarks,clusters=test_clusters,unit_vectors=u,linear=LIN,gamma=best_G)
p_label,p_acc,p_val = predict(test_y, te_x, model)
t6 = time.time()
printf("testing time:",t6-t5,"s")
printf("iteration results: (accuracy,mean squared error,squared correlation coefficient), learning time")
printf(evaluations(test_y,p_label),t6-t2)
print("-------------------\n")
return p_acc[0],t6-t5 | mit | 5,179,547,013,448,826,000 | 32.223529 | 123 | 0.617783 | false |
tln/tatl | tatlrt.py | 1 | 14490 | # TATL runtime lib
import json, re
from warnings import warn
try: unicode
except:
# Python 3
unicode = basestring = str
apply = lambda f, args=(), kw={}: f(*args, **kw)
# Define some of the TATL built-ins. Compiler uses __all__ to determine whether name refers to a
# built-in.
null = None
false = False
true = True
len = len
__all__ = ['len', 'true', 'false', 'null']
def public(obj):
"Mark a class or function as public (aka a builtin, available from TATL templates)"
__all__.append(obj.__name__)
return obj
# A namespace of filters.
@apply
@public
class filters:
def _add(self, fn, _alias=re.compile('Alias: (\w+)')):
"""Mark a function as a filter. Include Alias: name in the docstring
to make a shortened alias.
Also add logic such that if used in def="" context (ie, given a function),
it will return a wrapper.
eg filters.trim(" s") -> "s"
filters.trim(func)(...) -> filters.trim(func(...))
"""
def f(arg, *args, **kw):
if callable(arg) and not (args or kw):
return lambda *args, **kw: fn(arg(*args, **kw))
else:
return fn(arg, *args, **kw)
name = f.__name__ = fn.__name__
doc = f.__doc__ = fn.__doc__
setattr(self, name, f)
for alias in _alias.findall(doc or ''):
setattr(self, alias, f)
return fn
# Marker for safe strings
@filters._add
class safe(unicode): "Quoted strings are 'safe' and do not get quoted again."
# Buffer logic, use fastbuf if available or lists if not
# This can be turned off/on at runtime, to enable testing using both paths
def use_fast(flag):
"Turn on fast mode, if possible. Return whether fast mode is in use."
global Buf, join, safejoin, fast
if flag:
try:
from fastbuf import Buf, set_safe_class
set_safe_class(safe)
join = unicode
safejoin = safe
fast = True
return True
except ImportError:
pass
def Buf():
return [].append
def join(b):
return u''.join(b.__self__)
def safejoin(b):
return safe(join(b))
fast = False
return False
use_fast(True)
# Quoting / escaping logic.
# Quote occurs through a type-switch mechanism which is faster than if isinstance chains.
_quote_safe = lambda s: s
def _quote_str(o):
"""Escape a str/unicode object. Note that compiled code never uses ' for attributes and >
doesn't needed to be escaped to form valid HTML. These replace calls are a big cost,
so saving 40% of them is a win.
"""
return o.replace(u'&', u'&')\
.replace(u'<', u'<')\
.replace(u"'", u''')
def _quote_other(o, q=_quote_str):
"""Escape a non-basestring, non-unicode, non-number, non-bool, non-null object.
Lists are space separated, dictionaries are repr-ed
"""
if isinstance(o, (tuple, list)):
return q(' '.join(map(unicode, o)))
return q(unicode(o))
class _Context(object):
"Context object, created for each TATL macro"
# Define type-switches for quoting
q_def = {
int: unicode,
float: '%.16g'.__mod__,
safe: _quote_safe,
}
q = {
'none': (json.dumps, {
str: str,
unicode: unicode,
}),
'attr': (_quote_other, {
str: _quote_str,
unicode: _quote_str,
}),
}
quote = None
def __init__(self, ctxname):
self.qstack = [0] # track whether .quote has been called with an empty value
self.mkquote(ctxname)
def mkquote(self, ctxname):
# Build a quoting function from type switches
from collections import defaultdict
default, typesdict = self.q[ctxname]
d = defaultdict(lambda:default, self.q_def)
d.update(typesdict)
d[None.__class__] = self._none
d[bool] = self._bool
self.quote = lambda obj: d[obj.__class__](obj)
def _none(self, arg):
self.qstack[-1] = 1
return ''
def _bool(self, arg):
if arg: return 'true'
self.qstack[-1] = 1
return ''
def star(self):
#NB broken
return _Star(self.estack[-1], self.quote), + self.push()
def plusplus(self):
#NB broken
return _Plusplus(self.estack[-1]), + self.push()
def elidestart(self):
self.qstack.append(0)
return Buf()
def elidecheck(self, emit):
checkresult = not (getattr(emit, 'blank_flag', 0) or self.qstack.pop())
return checkresult, safejoin(emit)
def load(self, name, path):
o = __import__(name) # TODO we need a whitelist here
o = getattr(o, path.pop(0)) # error if first name not found
return self.get(o, path)
def get(self, o, path):
for p in path:
o = self.get1(o, p)
return o
def applyauto(self, func, locals):
if isinstance(func, (_Star, _Plusplus)):
argnames = ['dot']
else:
co = func.__code__
argnames = co.co_varnames[:co.co_argcount]
args = [locals.get(a) for a in argnames]
result = func(*args)
return result or ''
def applyargs(self, func, *args):
result = func(*args)
return result or ''
def items(self, obj):
if obj is None:
return ()
try:
m = obj.items
except AttributeError:
return enumerate(obj)
else:
return sorted(m())
def itemsUnsorted(self, obj):
if obj is None:
return ()
try:
m = obj.items
except AttributeError:
return enumerate(obj)
else:
return m()
def iter(self, obj):
if obj is None or obj == '':
return []
elif isinstance(obj, basestring):
return [obj]
else:
return obj
def search(self, pattern, object):
if isinstance(object, basestring):
return re.search(pattern, object) is not None
return False
def range_incl(self, n, m):
# Implement n...m logic.
return range(n, m+1) if n < m else range(n, m-1, -1)
def range_excl(self, n, m):
# Implement n..m logic.
return range(n, m) if n < m else range(n-1, m-1, -1)
def get1(self, o, p):
"Implement path lookup, both {o.p} and {o[p]}"
try:
return o[p]
except (TypeError, KeyError, IndexError, AttributeError):
if not isinstance(p, basestring): return None
try:
return getattr(o, p, None)
except Exception:
pass
except Exception:
pass
warn("Unexpected error getting %r[%r]: %s" % (o, p, e))
return None
def ctx(name):
c = _Context(name)
return c, c.quote
# Used elsewhere in tatl for quoting
_attr = _Context('attr')
# Used to implement {*:x}
class _Star:
def __init__(self, l, quote):
self._l = l
self._len = len(l)
self._sp = 0
self._quote = quote
def __call__(self, o):
s = self._quote(o)
if s:
if self._sp:
s = ' '+s
self._sp = s[-1:] not in ' \n'
self._l.append(s)
return o
def __unicode__(self):
return ''.join(self._l[self._len:])
def __getitem__(self, i):
return self._l[i + self._len]
def __len__(self):
return len(self._l) - self._len
# Used to implement {++:x}
class _Plusplus:
def __init__(self, l):
self._l = l
self._ix = len(l)
l.append('0')
self.cur = 0
def __call__(self, value=""):
if value or value == "":
self.cur += 1
self._l[self._ix] = str(self.cur)
return ''
def __unicode__(self):
return unicode(self.cur)
def __cmp__(self, other):
return cmp(self.cur, other)
def __int__(self):
return self.cur
# forloop, swiss army knife of looping
class _Forloop(object):
length = 0
counter0 = None
key = None
value = None
sum = None
pre = False
post = False
prev = None
next = None
counter = property(lambda self: None if self.counter0 is None else self.counter0 + 1)
first = property(lambda self: self.counter0 == 0)
last = property(lambda self: self.counter == self.length)
def __init__(self, length, cycle=[], firstclass='first', lastclass='last', preclass='', postclass='', **opts):
self.length = length
self.cycle = cycle
self.firstclass = firstclass
self.lastclass = lastclass
self.preclass = preclass
self.postclass = postclass
def classes(self):
l = []
if self.preclass and self.pre:
l.append(self.preclass)
if self.firstclass and self.first:
l.append(self.firstclass)
if self.cycle:
l.append(self.cycle[self.counter0 % len(self.cycle)])
if self.lastclass and self.last:
l.append(self.lastclass)
if self.postclass and self.post:
l.append(self.postclass)
return ' '.join(l)
def make_next(self):
next = self.__class__(
self.length,
self.cycle,
self.firstclass,
self.lastclass,
self.preclass,
self.postclass
)
self.next = next
next.prev = self
return next
def __repr__(self):
result = '<forloop:'
for k, v in self.__dict__.items():
if k in ('prev', 'next', 'cycle') or k.endswith('class'): continue
result += ' %s=%r' % (k, v)
return result + ' classes=%r>' % self.classes()
@public
def forloop(obj, opts={}):
"Support forloop.counter, etc"
#forloop [pre] should have counter = counter0 = key = value = null
if obj is None:
return
if isinstance(obj, basestring):
obj = [obj]
agg = opts.pop('total', None)
agg = agg and Aggregator(agg)
result = _Forloop(len(obj), **opts)
if bool(result.preclass):
result.pre = True
lastresult = result
result = result.make_next()
else:
lastresult = None
for result.counter0, (result.key, result.value) in enumerate(_attr.items(obj)):
if agg: agg(result.value)
if lastresult:
yield lastresult
lastresult = result
result = result.make_next()
if lastresult:
lastresult.next = None
yield lastresult
if result.postclass or agg:
result.prev = None
result.post = True
result.key = opts.get('totalkey')
result.value = agg and agg.value()
yield result
@public
def sum(values, _builtin=sum):
try:
values = map(float, values)
except:
return None
return _builtin(values)
class Aggregator:
def __init__(self, aggregators):
if callable(aggregators):
self.aggfn = aggregators
self.consts = self.aggfns = {}
self.has_aggs = True
self.values = []
else:
l = [{}, {}]
self.aggfn = None
self.aggfns = l[True]
self.consts = l[False]
for k, v in aggregators.items():
l[callable(v)][k] = v
self.has_aggs = bool(self.aggfns or self.consts)
self.values = dict((k, []) for k in self.aggfns)
def __call__(self, value):
if not self.has_aggs: return
if self.aggfn:
self.values.append(value)
else:
for key in self.aggfns:
self.values[key].append(_attr.get1(value, key))
def value(self):
if not self.has_aggs:
return None
if self.aggfn:
return self.aggfn(self.values)
d = self.consts.copy()
for key, fn in self.aggfns.items():
d[key] = fn(self.values[key])
return d
# Additional filters
@filters._add
def url(s):
"Alias: u"
import urllib
return urllib.quote(s)
def tostr(s):
"Convert object to string with same semantics as default context"
if s is None:
return ''
if isinstance(s, basestring):
return s
if isinstance(s, float):
return '%.16g' % s
return unicode(s)
@filters._add
def trim(s):
"A filter"
return tostr(s).strip()
TAG = re.compile('(\s*<)([a-zA-Z0-9_.:-]+)(.*?>)', re.DOTALL)
# Tag-oriented filters
def _findtag(s, fn):
if not isinstance(s, basestring): return s
start = m = TAG.match(s)
if not m: return s
count = 1
p = re.compile('<(/?)%s\s*' % start.group(2))
while count:
m = p.search(s, m.end())
if not m: return s
count += -1 if m.group(1) else 1
if s[m.end()+1:].strip(): return s
return fn(s, start, m)
@public
def contents(inner):
"""
>>> contents(u' <title>HI</title> ')
u'HI'
>>> contents(u'<p>1</p><p>2</p>')
u'<p>1</p><p>2</p>'
>>> contents(u'<p><p>1</p><p>2</p></p>')
u'<p>1</p><p>2</p>'
"""
return safe(_findtag(inner, lambda s, start, end: s[start.end():end.start()]))
notpassed = object()
@public
def tag(tagname, attrs_or_inner, inner=notpassed):
"""
>>> tag('h1', {}, u'HI')
u'<h1>HI</h1>'
>>> tag('h1', {}, u'H&I')
u'<h1>H&I</h1>'
>>> tag('h1', None, safe(u'<title>HI</title>'))
u'<h1><title>HI</title></h1>'
>>> tag('h1', {'class': 'large'}, safe(u'foo:<title>HI</title>'))
u'<h1 class="large">foo:<title>HI</title></h1>'
"""
if inner is notpassed:
attstr = ''
inner = attrs_or_inner
else:
attrs = attrs_or_inner or {}
attstr = ''.join(
' %s="%s"' % (k, _attr.quote(v))
for k, v in sorted(attrs.items())
)
return safe(u'<%s>%s</%s>' % (tagname+attstr, _attr.quote(inner), tagname))
@public
def attrs(attrs, inner):
"""
>>> attrs({'id':'id123'}, u'<title>HI</title>')
u'<title id="id123">HI</title>'
"""
def _replace(s, start, end):
attstr = ''.join(' %s="%s"' % (k, _attr.quote(v)) for k, v in attrs.items())
e = start.end(2)
return s[:e]+attstr+s[e:]
return safe(_findtag(inner, _replace))
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-2-clause | -4,284,716,867,018,305,000 | 26.705545 | 114 | 0.536508 | false |
trustyou/tyluigiutils | setup.py | 1 | 1848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import os.path
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
REQUIREMENTS_FOLDER = os.getenv('REQUIREMENTS_PATH', '')
requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements.txt"), 'r')]
test_requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements_dev.txt"), 'r')]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='tyluigiutils',
version='0.2.0',
description="Misc Luigi related code used by TrustYou ",
long_description=readme + '\n\n' + history,
author="Miguel Cabrera",
author_email='[email protected]',
url='https://github.com/mfcabrera/tyluigiutils',
packages=find_packages('.'),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='tyluigiutils',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| mit | 1,872,680,828,354,485,800 | 27 | 115 | 0.643398 | false |
tombstone/models | research/object_detection/builders/hyperparams_builder_test.py | 1 | 33333 | # Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import tf_version
def _get_scope_key(op):
return getattr(op, '_key_op', str(op))
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests.')
class HyperparamsBuilderTest(tf.test.TestCase):
def test_default_arg_scope_has_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d), scope)
def test_default_arg_scope_has_separable_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.separable_conv2d), scope)
def test_default_arg_scope_has_conv2d_transpose_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d_transpose), scope)
def test_explicit_fc_op_arg_scope_has_fully_connected_op(self):
conv_hyperparams_text_proto = """
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.fully_connected), scope)
def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
kwargs_1, kwargs_2, kwargs_3 = scope.values()
self.assertDictEqual(kwargs_1, kwargs_2)
self.assertDictEqual(kwargs_1, kwargs_3)
def test_return_l1_regularized_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = list(scope.values())[0]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularizer_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_non_default_batch_norm_params_with_train_during_train(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertTrue(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_during_eval(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=False)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_when_train_is_false(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_do_not_use_batch_norm_if_default(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], None)
def test_use_none_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], None)
def test_use_relu_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu)
def test_use_relu_6_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6)
def test_use_swish_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.swish)
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = tf.get_variable(
name='test',
shape=shape,
dtype=tf.float32,
initializer=initializer)
sess.run(tf.global_variables_initializer())
values = sess.run(var)
self.assertAllClose(np.var(values), variance, tol, tol)
def test_variance_in_range_with_variance_scaling_initializer_fan_in(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only tests.')
class KerasHyperparamsBuilderTest(tf.test.TestCase):
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
var = tf.Variable(initializer(shape=shape, dtype=tf.float32))
self.assertAllClose(np.var(var.numpy()), variance, tol, tol)
def test_return_l1_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularizer_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_non_default_batch_norm_params_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertAlmostEqual(batch_norm_params['momentum'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
batch_norm_layer = keras_config.build_batch_norm()
self.assertIsInstance(batch_norm_layer,
freezable_batch_norm.FreezableBatchNorm)
def test_return_non_default_batch_norm_params_keras_override(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params(momentum=0.4)
self.assertAlmostEqual(batch_norm_params['momentum'], 0.4)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
def test_do_not_use_batch_norm_if_default_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertFalse(keras_config.use_batch_norm())
self.assertEqual(keras_config.batch_norm_params(), {})
# The batch norm builder should build an identity Lambda layer
identity_layer = keras_config.build_batch_norm()
self.assertIsInstance(identity_layer,
tf.keras.layers.Lambda)
def test_do_not_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertFalse(hyperparams['use_bias'])
def test_force_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
force_use_bias: true
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertTrue(hyperparams['use_bias'])
def test_use_none_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertIsNone(
keras_config.params(include_activation=True)['activation'])
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.identity)
def test_use_relu_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu)
def test_use_relu_6_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu6)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu6)
def test_use_swish_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.swish)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.swish)
def test_override_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
new_params = keras_config.params(activation=tf.nn.relu)
self.assertEqual(new_params['activation'], tf.nn.relu)
def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 1,661,032,376,450,685,000 | 33.117707 | 80 | 0.624906 | false |
bajibabu/merlin | src/run_tensorflow_with_merlin_io.py | 1 | 12293 | ################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://github.com/CSTR-Edinburgh/merlin
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import os
import sys
import time
import tensorflow as tf
from tensorflow_lib import configuration
from tensorflow_lib import data_utils
from tensorflow_lib.train import TrainTensorflowModels,Train_Encoder_Decoder_Models
class TensorflowClass(object):
def __init__(self, cfg):
###################################################
########## User configurable variables ############
###################################################
inp_feat_dir = cfg.inp_feat_dir
out_feat_dir = cfg.out_feat_dir
pred_feat_dir = cfg.pred_feat_dir
inp_file_ext = cfg.inp_file_ext
out_file_ext = cfg.out_file_ext
### Input-Output ###
self.inp_dim = cfg.inp_dim
self.out_dim = cfg.out_dim
self.inp_norm = cfg.inp_norm
self.out_norm = cfg.out_norm
self.inp_stats_file = cfg.inp_stats_file
self.out_stats_file = cfg.out_stats_file
self.inp_scaler = None
self.out_scaler = None
#### define model params ####
self.hidden_layer_type = cfg.hidden_layer_type
self.hidden_layer_size = cfg.hidden_layer_size
self.sequential_training = cfg.sequential_training
self.encoder_decoder = cfg.encoder_decoder
self.attention = cfg.attention
self.cbhg = cfg.cbhg
self.batch_size = cfg.batch_size
self.shuffle_data = cfg.shuffle_data
self.output_layer_type = cfg.output_layer_type
self.loss_function = cfg.loss_function
self.optimizer = cfg.optimizer
self.rnn_params = cfg.rnn_params
self.dropout_rate = cfg.dropout_rate
self.num_of_epochs = cfg.num_of_epochs
### Define the work directory###
self.model_dir = cfg.model_dir
### define train, valid, test ###
train_file_number = cfg.train_file_number
valid_file_number = cfg.valid_file_number
test_file_number = cfg.test_file_number
file_id_scp = cfg.file_id_scp
test_id_scp = cfg.test_id_scp
#### main processess ####
self.NORMDATA = cfg.NORMDATA
self.TRAINMODEL = cfg.TRAINMODEL
self.TESTMODEL = cfg.TESTMODEL
#### Generate only test list ####
self.GenTestList = cfg.GenTestList
###################################################
####### End of user-defined conf variables ########
###################################################
#### Create train, valid and test file lists ####
file_id_list = data_utils.read_file_list(file_id_scp)
train_id_list = file_id_list[0: train_file_number]
valid_id_list = file_id_list[train_file_number: train_file_number + valid_file_number]
test_id_list = file_id_list[train_file_number + valid_file_number: train_file_number + valid_file_number + test_file_number]
valid_test_id_list = file_id_list[train_file_number: train_file_number + valid_file_number + test_file_number]
self.inp_train_file_list = data_utils.prepare_file_path_list(train_id_list, inp_feat_dir, inp_file_ext)
self.out_train_file_list = data_utils.prepare_file_path_list(train_id_list, out_feat_dir, out_file_ext)
self.inp_valid_file_list = data_utils.prepare_file_path_list(valid_id_list, inp_feat_dir, inp_file_ext)
self.out_valid_file_list = data_utils.prepare_file_path_list(valid_id_list, out_feat_dir, out_file_ext)
self.inp_test_file_list = data_utils.prepare_file_path_list(valid_test_id_list, inp_feat_dir, inp_file_ext)
self.out_test_file_list = data_utils.prepare_file_path_list(valid_test_id_list, out_feat_dir, out_file_ext)
self.gen_test_file_list = data_utils.prepare_file_path_list(valid_test_id_list, pred_feat_dir, out_file_ext)
if self.GenTestList:
test_id_list = data_utils.read_file_list(test_id_scp)
self.inp_test_file_list = data_utils.prepare_file_path_list(test_id_list, inp_feat_dir, inp_file_ext)
self.gen_test_file_list = data_utils.prepare_file_path_list(test_id_list, pred_feat_dir, out_file_ext)
if not self.encoder_decoder:
self.tensorflow_models = TrainTensorflowModels(self.inp_dim, self.hidden_layer_size, self.out_dim, self.hidden_layer_type, self.model_dir,
output_type=self.output_layer_type, dropout_rate=self.dropout_rate,
loss_function=self.loss_function, optimizer=self.optimizer)
else:
self.encoder_decoder_models = Train_Encoder_Decoder_Models(self.inp_dim,self.hidden_layer_size,self.out_dim,self.hidden_layer_type,output_type=self.output_layer_type,\
dropout_rate=self.dropout_rate,loss_function=self.loss_function,optimizer=self.optimizer,\
attention=self.attention,cbhg=self.cbhg)
def normlize_data(self):
### normalize train data ###
if os.path.isfile(self.inp_stats_file) and os.path.isfile(self.out_stats_file):
self.inp_scaler = data_utils.load_norm_stats(self.inp_stats_file, self.inp_dim, method=self.inp_norm)
self.out_scaler = data_utils.load_norm_stats(self.out_stats_file, self.out_dim, method=self.out_norm)
else:
print('preparing train_x, train_y from input and output feature files...')
train_x, train_y, train_flen = data_utils.read_data_from_file_list(self.inp_train_file_list, self.out_train_file_list,\
self.inp_dim, self.out_dim, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
print('computing norm stats for train_x...')
inp_scaler = data_utils.compute_norm_stats(train_x, self.inp_stats_file, method=self.inp_norm)
print('computing norm stats for train_y...')
out_scaler = data_utils.compute_norm_stats(train_y, self.out_stats_file, method=self.out_norm)
def train_tensorflow_model(self):
print('preparing train_x, train_y from input and output feature files...')
#### load the data ####
train_x, train_y, train_flen = data_utils.read_data_from_file_list(self.inp_train_file_list, self.out_train_file_list,
self.inp_dim, self.out_dim, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
#### normalize the data ####
data_utils.norm_data(train_x, self.inp_scaler, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
data_utils.norm_data(train_y, self.out_scaler, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
#### define the model ####
if self.sequential_training:
utt_length=train_flen["utt2framenum"].values()
self.tensorflow_models.get_max_step(max(utt_length))
self.tensorflow_models.define_sequence_model()
elif self.encoder_decoder:
utt_length=train_flen["utt2framenum"].values()
super(Train_Encoder_Decoder_Models,self.encoder_decoder_models).__setattr__("max_step",max(utt_length))
self.encoder_decoder_models.define_encoder_decoder()
else:
self.tensorflow_models.define_feedforward_model()
#### train the model ####
print('training...')
if self.sequential_training:
### Train feedforward model ###
self.tensorflow_models.train_sequence_model(train_x, train_y, batch_size=self.batch_size, num_of_epochs=self.num_of_epochs, shuffle_data=self.shuffle_data,utt_length=utt_length)
elif self.encoder_decoder:
self.encoder_decoder_models.train_encoder_decoder_model(train_x,train_y,batch_size=self.batch_size,num_of_epochs=self.num_of_epochs,shuffle_data=True,utt_length=utt_length)
else:
self.tensorflow_models.train_feedforward_model(train_x, train_y, batch_size=self.batch_size, num_of_epochs=self.num_of_epochs, shuffle_data=self.shuffle_data)
def test_tensorflow_model(self):
#### load the data ####
print('preparing test_x from input feature files...')
test_x, test_flen = data_utils.read_test_data_from_file_list(self.inp_test_file_list, self.inp_dim)
#### normalize the data ####
data_utils.norm_data(test_x, self.inp_scaler)
#### compute predictions ####
if self.encoder_decoder:
self.encoder_decoder_models.predict(test_x,self.out_scaler,self.gen_test_file_list)
else:
self.tensorflow_models.predict(test_x, self.out_scaler, self.gen_test_file_list, self.sequential_training)
def main_function(self):
### Implement each module ###
if self.NORMDATA:
self.normlize_data()
if self.TRAINMODEL:
self.train_tensorflow_model()
if self.TESTMODEL:
self.test_tensorflow_model()
if __name__=="__main__":
if len(sys.argv) != 2:
print('usage: python run_tensorflow_with_merlin_io.py [config file name]')
sys.exit(1)
# create a configuration instance
# and get a short name for this instance
cfg = configuration.configuration()
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
print("--- Job started ---")
start_time = time.time()
# main function
tensorflow_instance = TensorflowClass(cfg)
# except:
# print "inp stats file is %s"%cfg.inp_stats_file
# sys.exit(0)
tensorflow_instance.main_function()
(m, s) = divmod(int(time.time() - start_time), 60)
print("--- Job completion time: %d min. %d sec ---" % (m, s))
sys.exit(0)
| apache-2.0 | -2,820,490,871,144,838,700 | 45.647287 | 189 | 0.610266 | false |
bassio/ipype | ipype/__main__.py | 1 | 1473 | import click
from pathlib import Path
from ipype.pipeline import IPypeApp, Pipeline
from traitlets.config import Config
@click.group(invoke_without_command=True)
@click.pass_context
def main(ctx, **args):
if ctx.invoked_subcommand is None:
rerun()
else:
pass
@main.command(context_settings=dict(ignore_unknown_options=True,))
@click.option('--pipeline', '-p', type=click.Path(exists=True))
@click.option('--output_dir', '-o', type=click.Path(exists=False))
@click.argument('cmdline_args', nargs=-1, type=click.UNPROCESSED)
def run(pipeline, output_dir, **cmdline_args):
c = Config()
c.Pipeline.path = pipeline
c.Pipeline.output_dir = output_dir
c.Pipeline.cmdline_args = cmdline_args['cmdline_args']
app = IPypeApp(config=c)
app.initialize()
app.pipeline.start()
#pipeline = Pipeline(config=c)
#pipeline.initialize()
#pipeline.start()
@main.command()
def rerun():
print('rerunning')
from ipype.config import DirPipelineConfigLoader
output_dir = str(Path(".").absolute())
pipeline_config = DirPipelineConfigLoader(output_dir).load_config()
c = Config()
c.Pipeline.path = pipeline_config['pipeline_dir']
c.Pipeline.output_dir = output_dir
c.Pipeline.cmdline_args = pipeline_config['cmdline_args']
app = IPypeApp(config=c)
app.initialize()
app.pipeline.start()
if __name__ == "__main__":
main()
pass
| bsd-3-clause | 1,204,749,345,119,081,200 | 24.396552 | 71 | 0.65852 | false |
harihpr/tweetclickers | test/default.py | 1 | 12268 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from pybossa.core import db
#from pybossa.model import rebuild_db
from pybossa.core import create_app, sentinel
from pybossa.model.app import App
from pybossa.model.category import Category
from pybossa.model.task import Task
from pybossa.model.task_run import TaskRun
from pybossa.model.user import User
import pybossa.model as model
from functools import wraps
from factories import reset_all_pk_sequences
import random
import os
os.environ['PYBOSSA_SETTINGS'] = '../settings_test.py'
os.environ['PYBOSSA_REDIS_CACHE_DISABLED'] = '1'
flask_app = create_app(run_as_server=False)
def with_context(f):
@wraps(f)
def decorated_function(*args, **kwargs):
with flask_app.app_context():
return f(*args, **kwargs)
return decorated_function
def rebuild_db():
"""Rebuild the DB."""
db.drop_all()
db.create_all()
class Test(object):
def setUp(self):
self.flask_app = flask_app
self.app = flask_app.test_client()
with self.flask_app.app_context():
rebuild_db()
reset_all_pk_sequences()
def tearDown(self):
with self.flask_app.app_context():
db.session.remove()
self.redis_flushall()
reset_all_pk_sequences()
fullname = u'T Tester'
fullname2 = u'T Tester 2'
email_addr = u'[email protected]'
email_addr2 = u'[email protected]'
root_addr = u'[email protected]'
name = u'tester'
name2 = u'tester-2'
root_name = u'root'
api_key = 'tester'
api_key_2 = 'tester-2'
root_api_key = 'root'
app_name = u'My New Project'
app_short_name = u'test-app'
password = u'tester'
root_password = password + 'root'
cat_1 = 'thinking'
cat_2 = 'sensing'
def create(self,sched='default'):
root, user,user2 = self.create_users()
info = {
'total': 150,
'long_description': 'hello world',
'task_presenter': 'TaskPresenter',
'sched': sched
}
app = self.create_project(info)
app.owner = user
db.session.add(root)
db.session.commit()
db.session.add(user)
db.session.commit()
db.session.add(user2)
db.session.commit()
db.session.add(app)
task_info = {
'question': 'My random question',
'url': 'my url'
}
task_run_info = {
'answer': u'annakarenina'
}
# Create the task and taskruns for the first app
for i in range (0,10):
task, task_run = self.create_task_and_run(task_info, task_run_info, app, user,i)
db.session.add_all([task, task_run])
db.session.commit()
db.session.remove()
def create_2(self,sched='default'):
root, user,user2 = self.create_users()
info = {
'total': 150,
'long_description': 'hello world',
'task_presenter': 'TaskPresenter',
'sched': sched
}
app = self.create_project(info)
app.owner = user
db.session.add_all([root, user, user2, app])
task_info = {
'question': 'My random question',
'url': 'my url'
}
task_run_info = {
'answer': u'annakarenina'
}
# Create the task and taskruns for the first app
task, task_run = self.create_task_and_run(task_info, task_run_info, app, user,1)
db.session.add_all([task, task_run])
db.session.commit()
db.session.remove()
def create_users(self):
root = User(
email_addr = self.root_addr,
name = self.root_name,
passwd_hash = self.root_password,
fullname = self.fullname,
api_key = self.root_api_key)
root.set_password(self.root_password)
user = User(
email_addr = self.email_addr,
name = self.name,
passwd_hash = self.password,
fullname = self.fullname,
api_key = self.api_key
)
user.set_password(self.password)
user2 = User(
email_addr = self.email_addr2,
name = self.name2,
passwd_hash = self.password + "2",
fullname = self.fullname2,
api_key=self.api_key_2)
user2.set_password(self.password)
return root, user, user2
def create_project(self,info):
with self.flask_app.app_context():
category = db.session.query(Category).first()
if category is None:
self._create_categories()
category = db.session.query(Category).first()
app = App(
name=self.app_name,
short_name=self.app_short_name,
description=u'description',
hidden=0,
category_id=category.id,
info=info
)
return app
def create_task_and_run(self,task_info, task_run_info, app, user, order):
task = Task(app_id = 1, state = '0', info = task_info, n_answers=10)
task.app = app
# Taskruns will be assigned randomly to a signed user or an anonymous one
if random.randint(0,1) == 1:
task_run = TaskRun(
app_id = 1,
task_id = 1,
user_id = 1,
info = task_run_info)
task_run.user = user
else:
task_run = TaskRun(
app_id = 1,
task_id = 1,
user_ip = '127.0.0.%s' % order,
info = task_run_info)
task_run.task = task
return task, task_run
def _create_categories(self):
names = [self.cat_1, self.cat_2]
db.session.add_all([Category(name=c_name,
short_name=c_name.lower().replace(" ",""),
description=c_name)
for c_name in names])
db.session.commit()
def redis_flushall(self):
sentinel.connection.master_for('mymaster').flushall()
class Fixtures:
fullname = u'T Tester'
fullname2 = u'T Tester 2'
email_addr = u'[email protected]'
email_addr2 = u'[email protected]'
root_addr = u'[email protected]'
name = u'tester'
name2 = u'tester-2'
root_name = u'root'
api_key = 'tester'
api_key_2 = 'tester-2'
root_api_key = 'root'
app_name = u'My New Project'
app_short_name = u'test-app'
password = u'tester'
root_password = password + 'root'
cat_1 = 'thinking'
cat_2 = 'sensing'
@classmethod
def create(cls,sched='default'):
root, user,user2 = Fixtures.create_users()
info = {
'total': 150,
'long_description': 'hello world',
'task_presenter': 'TaskPresenter',
'sched': sched
}
app = Fixtures.create_project(info)
app.owner = user
db.session.add(root)
db.session.commit()
db.session.add(user)
db.session.commit()
db.session.add(user2)
db.session.commit()
db.session.add(app)
task_info = {
'question': 'My random question',
'url': 'my url'
}
task_run_info = {
'answer': u'annakarenina'
}
# Create the task and taskruns for the first app
for i in range (0,10):
task, task_run = Fixtures.create_task_and_run(task_info, task_run_info, app, user,i)
db.session.add_all([task, task_run])
db.session.commit()
db.session.remove()
@classmethod
def create_2(cls,sched='default'):
root, user,user2 = Fixtures.create_users()
info = {
'total': 150,
'long_description': 'hello world',
'task_presenter': 'TaskPresenter',
'sched': sched
}
app = Fixtures.create_project(info)
app.owner = user
db.session.add_all([root, user, user2, app])
task_info = {
'question': 'My random question',
'url': 'my url'
}
task_run_info = {
'answer': u'annakarenina'
}
# Create the task and taskruns for the first app
task, task_run = Fixtures.create_task_and_run(task_info, task_run_info, app, user,1)
db.session.add_all([task, task_run])
db.session.commit()
db.session.remove()
@classmethod
def create_users(cls):
root = User(
email_addr = cls.root_addr,
name = cls.root_name,
passwd_hash = cls.root_password,
fullname = cls.fullname,
api_key = cls.root_api_key)
root.set_password(cls.root_password)
user = User(
email_addr = cls.email_addr,
name = cls.name,
passwd_hash = cls.password,
fullname = cls.fullname,
api_key = cls.api_key)
user.set_password(cls.password)
user2 = User(
email_addr = cls.email_addr2,
name = cls.name2,
passwd_hash = cls.password + "2",
fullname = cls.fullname2,
api_key=cls.api_key_2)
user2.set_password(cls.password)
return root, user, user2
@classmethod
def create_project(cls,info):
category = db.session.query(Category).first()
if category is None:
cls.create_categories()
category = db.session.query(Category).first()
app = App(
name=cls.app_name,
short_name=cls.app_short_name,
description=u'description',
hidden=0,
category_id=category.id,
info=info
)
return app
@classmethod
def create_task_and_run(cls,task_info, task_run_info, app, user, order):
task = Task(app_id = 1, state = '0', info = task_info, n_answers=10)
task.app = app
# Taskruns will be assigned randomly to a signed user or an anonymous one
if random.randint(0,1) == 1:
task_run = TaskRun(
app_id = 1,
task_id = 1,
user_id = 1,
info = task_run_info)
task_run.user = user
else:
task_run = TaskRun(
app_id = 1,
task_id = 1,
user_ip = '127.0.0.%s' % order,
info = task_run_info)
task_run.task = task
return task, task_run
@classmethod
def create_categories(cls):
names = [cls.cat_1, cls.cat_2]
db.session.add_all([Category(name=c_name,
short_name=c_name.lower().replace(" ",""),
description=c_name)
for c_name in names])
db.session.commit()
@classmethod
def redis_flushall(cls):
sentinel.connection.master_for('mymaster').flushall()
def assert_not_raises(exception, call, *args, **kwargs):
try:
call(*args, **kwargs)
assert True
except exception as ex:
assert False, str(ex)
| agpl-3.0 | -2,231,355,643,079,829,200 | 29.593516 | 97 | 0.529834 | false |
nyu-dl/WebNav | op_link.py | 1 | 5077 | '''
Custom theano class to access page links.
'''
import numpy as np
import theano
from theano import gof
from theano import tensor
import time
import parameters as prm
import utils
class Link(theano.Op):
__props__ = ()
def __init__(self, wiki, wikipre, vocab):
self.wiki = wiki
self.wikipre = wikipre
self.vocab = vocab
self.mem = {}
def make_node(self, x, x2, x3, x4, x5):
# check that the theano version has support for __props__.
# This next line looks like it has a typo,
# but it's actually a way to detect the theano version
# is sufficiently recent to support the use of __props__.
assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
x = tensor.as_tensor_variable(x)
x2 = tensor.as_tensor_variable(x2)
x3 = tensor.as_tensor_variable(x3)
x4 = tensor.as_tensor_variable(x4)
x5 = tensor.as_tensor_variable(x5)
if prm.att_doc:
if prm.compute_emb:
td = tensor.itensor4().type()
else:
td = tensor.ftensor4().type()
tm = tensor.ftensor3().type()
else:
if prm.compute_emb:
td = tensor.itensor3().type()
else:
td = tensor.ftensor3().type()
tm = tensor.fmatrix().type()
return theano.Apply(self, [x,x2,x3,x4,x5], [td, tm, \
tensor.fmatrix().type(), tensor.ivector().type()])
def perform(self, node, inputs, output_storage):
#st = time.time()
pages_id = inputs[0]
p_truth = inputs[1]
it = int(inputs[2])
uidx = int(inputs[3])
k_beam = int(inputs[4])
run = True
if uidx in self.mem:
if it in self.mem[uidx]:
L, L_m, l_page_id, l_truth = self.mem[uidx][it]
run = False
if run:
max_links = k_beam
lst_links = []
for i, page_id in enumerate(pages_id):
if int(page_id) != -1:
links = self.wiki.get_article_links(page_id)
links = list(set(links)) # remove duplicates.
links.sort() # h5py only accepts sorted indexes.
lst_links.append(links)
if len(links) > max_links:
max_links = len(links)
else:
lst_links.append([])
if prm.att_doc:
if prm.compute_emb:
L = np.zeros((len(pages_id), max_links, prm.max_segs_doc, prm.max_words), np.int32)
else:
L = np.zeros((len(pages_id), max_links, prm.max_segs_doc, prm.dim_emb), np.float32)
L_m = np.zeros((len(pages_id), max_links, prm.max_segs_doc), np.float32)
else:
if prm.compute_emb:
L = np.zeros((len(pages_id), max_links, prm.max_words), np.int32)
else:
L = np.zeros((len(pages_id), max_links, prm.dim_emb), np.float32)
L_m = np.zeros((len(pages_id), max_links), np.float32)
l_page_id = -np.ones((len(pages_id), max_links+1), np.float32) # '+1' to consider stop action.
l_truth = np.zeros((len(pages_id)), np.int32)
for i, links in enumerate(lst_links):
if len(links) > 0:
if prm.compute_emb:
# retrieve the precomputed indexes.
links_c = self.wikipre.f['idx'][links]
else:
# retrieve the precomputed embeddings.
links_c = self.wikipre.f['emb'][links]
if prm.att_doc:
L[i,:len(links),:,:] = links_c
links_mask = self.wikipre.f['mask'][links]
for k, link_mask in enumerate(links_mask):
L_m[i,k,:link_mask] = 1.0
else:
L[i,:len(links),:] = links_c
L_m[i,:len(links)] = 1.0
l_page_id[i,1:len(links)+1] = links # +1 because of the stop action.
for k, link_id in enumerate(links):
if link_id == p_truth[i]:
l_truth[i] = k + 1 # +1 because of the stop action.
if uidx in self.mem:
self.mem[uidx][it] = [L, L_m, l_page_id, l_truth]
else:
self.mem = {uidx: {it: [L, L_m, l_page_id, l_truth]}}
output_storage[0][0] = L
output_storage[1][0] = L_m
output_storage[2][0] = l_page_id
output_storage[3][0] = l_truth
#print 'uidx', uidx, 'it', it, 'time Link op:', str(time.time() - st)
def grad(self, inputs, output_grads):
return [tensor.zeros_like(ii, dtype=theano.config.floatX) for ii in inputs]
| bsd-3-clause | -5,717,748,019,315,914,000 | 36.88806 | 106 | 0.478038 | false |
michaeltelford/gatecrasher | UDP.py | 1 | 1267 |
# Network module used for all UDP networking aspects of the Gatecrasher script.
# Developed by Michael Telford.
import socket
# Initializes socket with datagram proto and binds to port arg.
def bind(port):
global s
host = ''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
def send(addr, port):
global s
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = "gatecrasher request"
address = (addr, port)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(data, address)
def receive(timeout):
global s
s.settimeout(timeout)
while 1:
try:
string, address = s.recvfrom(1024)
return True
except socket.timeout:
return False
def receive_echo(port):
global s
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', port))
# Block until receive and then echo loop (continuous).
while 1:
string, address = s.recvfrom(1024)
s.sendto(string, address)
def close():
global s
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except socket.error:
pass
# End of module.
| mit | -2,328,868,848,086,685,000 | 19.770492 | 79 | 0.629045 | false |
adiyoss/DeepWDM | front_end/lib/data_slicing.py | 1 | 3506 | import cPickle
import os
from lib.htkmfc import HTKFeat_read
import numpy as np
__author__ = 'yossiadi'
# globals
rho = 360
# TODO also look at https://github.com/fchollet/keras/issues/68 for big datasets
def get_data(path):
"""
Reads the data as numpy d-array
:param path: the path to the data
:return: Numpy d-array for x and y
"""
# validation
if not path:
return None
f = file(path, 'rb')
loaded_objects = []
for i in range(2):
loaded_objects.append(cPickle.load(f))
f.close()
# x = split_2_time_steps(loaded_objects[0]) # get the features first
# y = split_2_time_steps(loaded_objects[1]) # get the labels
return loaded_objects[0], loaded_objects[1]
# split to time steps for the recurrent steps (BPTT)
def split_2_time_steps(data):
# get the features first
t = 1
x_raw = []
time_step_x = []
for i in xrange(len(data)):
x_raw.append(data[i])
if t == rho:
time_step_x.append(x_raw)
x_raw = []
t = 0
t += 1
x = np.array(time_step_x)
return x
def get_data_4_predict(x_dir, y_dir, is_y=True):
if is_y:
x_tmp = []
y_tmp = []
f_tmp = []
for item in os.listdir(x_dir):
if item.endswith(".htk"):
# read the mfcc features
reader = HTKFeat_read(x_dir + item)
matrix = reader.getall()
x_tmp.append(matrix)
labels = np.loadtxt(y_dir + item.replace("_16.htk", '.txt'))
y_tmp.append(labels)
f_tmp.append([item, len(labels)])
x = np.array(x_tmp)
y = np.array(y_tmp)
f_names = np.array(f_tmp)
return x, y, f_names
else:
x_tmp = []
f_tmp = []
for item in os.listdir(x_dir):
if item.endswith(".htk"):
# read the mfcc features
reader = HTKFeat_read(x_dir + item)
matrix = reader.getall()
x_tmp.append(matrix)
f_tmp.append([item, len(matrix)])
x = np.array(x_tmp)
f_names = np.array(f_tmp)
return x, f_names
def get_data_set(train_path, test_path):
"""
loads the train and test sets together
:param train_path: the path to the train data
:param test_path: the path to the test data
:return: 4 numpy d-arrays for the train and test features and labels
"""
x_train, y_train = get_data(train_path)
x_test, y_test = get_data(test_path)
s = 0
count = 0
for i in range(len(y_train)):
s += sum(y_train[i][:, 0])
count += len(y_train[i][:, 0])
prec_1 = (s/count)
print "========================"
print "Labels balance:"
print "The percentage of label 0: %.2f" % (1 - prec_1)
print "The percentage of label 1: %.2f" % prec_1
print "========================"
return x_train, y_train, x_test, y_test
# helper function
def convert_label2vec(path, output):
# read the features
fid = open(path)
lines = fid.readlines()
dim = int(lines[0].split()[0])
y = np.zeros([dim, 2])
y[:, 1] = 1
accumulate = 0
for i in xrange(1, len(lines)):
values = lines[i].split()
y[:, 0][int(values[0]) + accumulate:int(values[1]) + accumulate] = 1
y[:, 1][int(values[0]) + accumulate:int(values[1]) + accumulate] = 0
accumulate += int(values[2])
fid.close()
np.savetxt(output, y)
| mit | 7,266,948,927,122,422,000 | 26.825397 | 80 | 0.535368 | false |
fhorinek/pi8bit | py8bit/opt/controller.py | 1 | 45182 | from collections import OrderedDict
from cell import High, Low, Invisible
import wire
import cell
import pygame
import utils
from pygame import Rect
LEFT = 1
MID = 2
RIGHT = 3
WHEEL_UP = 4
WHEEL_DOWN = 5
MODE_IDLE = 0
MODE_MOVE = 1
MODE_ADD = 2
MODE_DEL = 3
MODE_WIRE = 4
MODE_PAN = 5
MODE_SELECT = 6
MODE_EDIT = 7
MODE_ADD_MODULE = 8
MODE_STEP = 9
MODE_RENAME = 10
NODE_DIR_NA = 0
NODE_DIR_FROM_NODE = 1
NODE_DIR_FROM_INPUT = 2
NODE_DIR_FROM_OUTPUT = 3
LIGHT_NONE = 0
LIGHT_POINT = 1
LIGHT_LINE = 2
class Controller():
def __init__(self, canvas, parent):
self.canvas = canvas
self.parent = parent
self.objects = OrderedDict()
self.objects["LOW"] = Low(self)
self.objects["HIGH"] = High(self)
self.selected = []
self.select = False
self.select_start = False
self.select_rect = Rect(0, 0, 0, 0)
self.possible_move = False
self.pan = False
self.pan_x = 0
self.pan_y = 0
self.pan_offset_x = 0
self.pan_offset_y = 0
self.new_node = False
self.new_node_direction = NODE_DIR_NA
self.zoom = 1.0
self.zoom_step = 0.1
self.obj_id = 0
self.net_id = 0
self.highlight_mode = LIGHT_NONE
self.highlight_pos = False
self.add_index = 0
self.add_list = ["label", "and", "or", "nand", "nor", "xor", "not", "diode", "led", "hex", "tgl", "push", "clk", "input", "output", "memory"]
self.font = pygame.font.Font(pygame.font.get_default_font(), int(self.canvas.style["d_font"] * self.zoom))
self.label_font = pygame.font.Font(pygame.font.get_default_font(), int(self.canvas.style["d_label_font"] * self.zoom))
self.need_solve_drawable = True
self.drawable = []
self.read_only = True
def highlight(self, mode, pos = False):
self.highlight_mode = mode
self.highlight_pos = pos
self.canvas.request_io_redraw()
def get_obj_id(self):
self.obj_id += 1
return self.obj_id
def get_net_id(self):
self.net_id += 1
return self.net_id
def normalize_positons(self):
big_rect = False
for k in self.objects:
o = self.objects[k]
if not isinstance(o, Invisible):
if big_rect:
big_rect = big_rect.union(o.rect)
else:
big_rect = o.rect
offset_x = big_rect[0]
offset_y = big_rect[1]
for k in self.objects:
o = self.objects[k]
pos_x = o.rect[0] - offset_x
pos_y = o.rect[1] - offset_y
o.set_pos(pos_x, pos_y)
def write_file(self, filename):
if self.read_only:
return
lines = ""
self.normalize_positons()
# print "Writing file", filename
line_n = 0
for k in self.objects:
if k in ["HIGH", "LOW"]:
continue
o = self.objects[k]
name = o.name
fcs = o.fcs
p = o.get_params()
if p == False:
continue
params = " ".join(p)
line = "\t".join([name, fcs, params])
lines += "%s\n" % line
# print " %5d: %s" % (line_n, line)
line_n += 1
f = open(filename, "w")
f.write(lines)
f.close()
# print "done", filename
def read_file(self, filename):
print "Reading file", filename
try:
f = open(filename, "r")
data = f.readlines()
f.close()
self.create_objects(data)
print "done", filename
return True
except IOError as e:
print "not found", e
return False
def create_objects(self, data):
params = OrderedDict()
line_n = 0
for line in data:
line_n += 1
arr = line.split()
print " %5d: %s" % (line_n, " ".join(arr))
if (len(arr) < 2):
continue
name = arr[0]
fcs = arr[1]
#calc obj id
s = name.split("_")
if len(s) == 4 and s[0] == "" and s[1] == "":
try:
obj_id = int(s[3])
self.obj_id = max(obj_id + 1, self.obj_id)
except ValueError:
pass
#calc net id
if fcs == "node":
s = arr[3].split("_")
if len(s) == 4 and s[0] == "" and s[1] == "":
try:
net_id = int(s[3])
self.net_id = max(net_id + 1, self.net_id)
except ValueError:
pass
o = False
if fcs in self.canvas.cells:
o = self.canvas.cells[fcs](self)
if (o is not False):
params[name] = arr
self.objects[name] = o
#let object to parse parameters
for name in params:
arr = params[name]
o = self.objects[name]
o.parse(arr)
def find_cell(self, name):
if name in self.objects:
return self.objects[name]
else:
return False
def find_cell_pin(self, name):
arr = name.split(".")
if (len(arr) == 1):
o_name = arr[0]
o_pin = False
else:
o_name, o_pin = arr
o = self.find_cell(o_name)
if o == False:
print name, "not found!"
return False
if o_pin == False:
if len(o.outputs) > 0:
o_pin = o.outputs[0]
else:
o_pin = False
return o, o_pin
def find_output(self, obj, pin):
for k in self.objects:
o = self.objects[k]
for p in o.inputs:
pair = o.inputs[p]
if pair == False:
continue
if pair[0] == obj and pair[1] == pin:
return o, p
return False
def blit(self, surface, rect):
rect = Rect(rect)
rect.x += self.pan_offset_x
rect.y += self.pan_offset_y
rect.x *= self.zoom
rect.y *= self.zoom
self.canvas.screen.blit(surface, rect)
def draw_circle(self, pos, state):
pos = list(pos)
pos[0] += self.pan_offset_x
pos[1] += self.pan_offset_y
pos = [int(x * self.zoom) for x in pos]
if (state):
color = self.canvas.style["c_high"]
else:
color = self.canvas.style["c_low"]
self.canvas.draw_circle(color, pos, self.zoom)
def draw_line(self, start, end, state):
#copy the data
start = list(start)
end = list(end)
start[0] += self.pan_offset_x
start[1] += self.pan_offset_y
end[0] += self.pan_offset_x
end[1] += self.pan_offset_y
start = [int(x * self.zoom) for x in start]
end = [int(x * self.zoom) for x in end]
if state:
color = self.canvas.style["c_high"]
else:
color = self.canvas.style["c_low"]
self.canvas.draw_line(start, end, color, self.zoom)
def draw_rect(self, surface, color, rect, width = 0):
rect = Rect(rect)
w = int(width * self.zoom)
rect = Rect([int(x * self.zoom) for x in rect])
if width > 0 and w == 0:
w = 1
pygame.draw.rect(surface, color, rect, w)
def draw_text(self, surface, text, rect):
tmp = self.font.render(text, True, self.canvas.style["c_text"])
rect2 = tmp.get_rect()
rect = Rect([int(x * self.zoom) for x in rect])
rect = [rect.x + rect.w / 2 - rect2.w / 2, rect.y + rect.h / 2 - rect2.h / 2]
surface.blit(tmp, rect)
def draw_label(self, text, rect):
tmp = self.label_font.render(text, True, self.canvas.style["c_label"])
rect2 = tmp.get_rect()
rect = Rect([int(x * self.zoom) for x in rect])
rect = [rect.x + rect.w / 2 - rect2.w / 2, rect.y + rect.h / 2 - rect2.h / 2]
return tmp
def label_font_size(self, text):
label_font = pygame.font.Font(pygame.font.get_default_font(), self.canvas.style["d_label_font"])
tmp = label_font.render(text, True, self.canvas.style["c_text"])
rect2 = tmp.get_rect()
return rect2
def draw_highlight(self):
if self.highlight_mode == LIGHT_LINE:
start = list(self.highlight_pos[0])
end = list(self.highlight_pos[1])
width = self.canvas.style["d_line_height"]
w = int(width * self.zoom)
start[0] += self.pan_offset_x
start[1] += self.pan_offset_y
start = [int(x * self.zoom) for x in start]
end[0] += self.pan_offset_x
end[1] += self.pan_offset_y
end = [int(x * self.zoom) for x in end]
if width > 0 and w == 0:
w = 1
pygame.draw.line(self.canvas.screen, self.canvas.style["c_highlight"], start, end, w)
if self.highlight_mode == LIGHT_POINT:
width = self.canvas.style["d_point"]
w = int(width * self.zoom)
point = list(self.highlight_pos)
point[0] += int(self.pan_offset_x)
point[1] += int(self.pan_offset_y)
point = [int(x * self.zoom) for x in point]
if width > 0 and w == 0:
w = 1
pygame.draw.circle(self.canvas.screen, self.canvas.style["c_highlight"], point, w)
def draw_highlight_box(self, rect):
rect = Rect(rect)
width = self.canvas.style["d_line_height"]
w = int(width * self.zoom)
rect.x += self.pan_offset_x
rect.y += self.pan_offset_y
rect = Rect([int(x * self.zoom) for x in rect])
if width > 0 and w == 0:
w = 1
pygame.draw.rect(self.canvas.screen, self.canvas.style["c_highlight"], rect, w)
def mk_surface(self, rect):
size = [int(rect.w * self.zoom), int(rect.h * self.zoom)]
return pygame.Surface(size, self.canvas.surface_flags)
def update_zoom(self):
self.font = pygame.font.Font(pygame.font.get_default_font(), int(self.canvas.style["d_font"] * self.zoom))
self.label_font = pygame.font.Font(pygame.font.get_default_font(), int(self.canvas.style["d_label_font"] * self.zoom))
self.solve_drawable()
for k in self.objects:
self.objects[k].request_update_body()
if self.canvas.mode == MODE_ADD:
self.new_node.request_update_body()
self.canvas.request_redraw()
def request_redraw(self):
for o in self.drawable:
o.request_redraw()
def solve_drawable(self):
self.need_solve_drawable = True
def draw(self, mode):
if self.need_solve_drawable:
self.need_solve_drawable = False
window = Rect(-self.pan_offset_x, -self.pan_offset_y, self.canvas.size[0] / self.zoom, self.canvas.size[1] / self.zoom)
self.drawable = []
for k in self.objects:
self.objects[k].solve_drawable(window, self.drawable)
if mode == MODE_SELECT:
self.canvas.request_redraw()
self.canvas.request_io_redraw()
for o in self.drawable:
o.draw()
o.draw_io()
if mode == MODE_SELECT:
self.select_rect.normalize()
self.draw_highlight_box(self.select_rect)
for o in self.selected:
self.draw_highlight_box(o.rect)
if mode == MODE_WIRE:
self.draw_highlight()
if mode in [MODE_ADD, MODE_ADD_MODULE]:
if self.new_node is not False:
self.new_node.draw()
self.new_node.draw_io()
def tick(self):
for k in self.objects:
self.objects[k].tick()
def reset(self):
for k in self.objects:
self.objects[k].reset()
def request_update(self): pass
def clear_io_cache(self):
for o in self.drawable:
o.clear_io_cache()
def get_object_pos(self, pos, exclude = []):
pos = list(pos)
object_list = list(self.drawable)
object_list.reverse()
for o in object_list:
if o in exclude:
continue
if (o.rect.collidepoint(pos)):
return o
return False
#wire form input / output
def get_line_pos(self, pos, exclude = []):
pos = list(pos)
for o in self.drawable:
if o in exclude:
continue
data = o.check_input_line_collision(pos)
if (data):
if data[2] in exclude:
continue
return data
return False
#wire form net
def get_net_line_pos(self, pos, exclude=[]):
pos = list(pos)
for o in self.drawable:
if isinstance(o, wire.Node):
if o in exclude:
continue
data = o.check_net_line_collision(pos)
if (data):
if data[1] in exclude:
continue
return data
return False
def get_output_pos(self, pos, exclude=[]):
pos = list(pos)
for o in self.drawable:
if o in exclude:
continue
pin = o.check_output_collision(pos)
if (pin):
return o, pin
return False
def get_input_pos(self, pos, exclude=[]):
pos = list(pos)
for o in self.drawable:
if o in exclude:
continue
pin = o.check_input_collision(pos)
if (pin):
return o, pin
return False
def add_object(self, fcs, pos, params = []):
o = self.canvas.cells[fcs](self)
name = "__%s_%d" % (fcs, self.get_obj_id())
self.objects[name] = o
o.update()
o.middle_offset()
pos = "%dx%d" % (pos[0], pos[1])
o.parse([name, fcs, pos] + params)
self.request_redraw()
self.solve_drawable()
return o
def add_node(self, pos, net = False):
o = self.canvas.cells["node"](self)
name = "__node_%d" % (self.get_obj_id())
self.objects[name] = o
o.update()
o.middle_offset()
pos = "%dx%d" % (pos[0], pos[1])
if net is False:
net = self.add_net()
o.parse([name, "node", pos, net.name])
self.request_redraw()
self.solve_drawable()
return o
def add_net(self, net_name = False):
if net_name is False:
net_name = "__net_%d" % (self.get_net_id())
o = self.canvas.cells["net"](self)
self.objects[net_name] = o
o.parse([net_name, "net"])
return o
def apply_grid(self, obj):
g_hor = self.canvas.style["g_hor"]
g_ver = self.canvas.style["g_ver"]
obj.rect.x = int(round(obj.rect.x / float(g_hor)) * g_hor)
obj.rect.y = int(round(obj.rect.y / float(g_ver)) * g_ver)
obj.clear_offset()
obj.update_io_xy()
def delete(self, name):
if name in self.objects:
self.objects[name].disconnect()
del self.objects[name]
self.canvas.request_redraw()
self.solve_drawable()
def select_obj(self, objs):
for o in objs:
if o not in self.selected and not isinstance(o, Invisible):
self.selected.append(o)
#self.canvas.request_io_redraw()
def deselect_obj(self, objs):
for o in objs:
if o in self.selected:
self.selected.remove(o)
self.canvas.request_redraw()
def tglselect_obj(self, obj):
if obj in self.selected:
self.deselect_obj([obj])
else:
self.select_obj([obj])
def clear_selection(self):
self.selected = []
#self.canvas.request_io_redraw()
def rename_obj(self, obj, new_name):
if new_name in self.objects:
return False
del self.objects[obj.name]
obj.name = new_name
self.objects[new_name] = obj
obj.update()
return True
def event(self, event, mode):
#GET event info
hover_object = False
keys = pygame.key.get_pressed()
if hasattr(event, "pos"):
mouse_x = (event.pos[0] / self.zoom) - self.pan_offset_x
mouse_y = (event.pos[1] / self.zoom) - self.pan_offset_y
hover_object = self.get_object_pos([mouse_x, mouse_y])
if keys[pygame.K_LCTRL]:
g_hor = self.canvas.style["g_hor"]
g_ver = self.canvas.style["g_ver"]
mouse_x = int(round(mouse_x / float(g_hor)) * g_hor)
mouse_y = int(round(mouse_y / float(g_ver)) * g_ver)
if event.type == pygame.KEYDOWN:
if event.key == ord('a') and self.canvas.mode == MODE_EDIT:
fcs = self.add_list[self.add_index]
pos = "%dx%d" % (0, 0)
name = "_%s_" % fcs
self.new_node = self.canvas.cells[fcs](self)
self.new_node.update()
self.new_node.middle_offset()
self.new_node.parse([name, fcs, pos])
self.canvas.set_mode(MODE_ADD)
if event.key == ord('m') and self.canvas.mode == MODE_EDIT:
self.canvas.set_mode(MODE_ADD_MODULE)
if event.key == ord('e') and self.canvas.mode in [MODE_IDLE, MODE_WIRE, MODE_RENAME]:
self.highlight(LIGHT_NONE)
self.canvas.set_mode(MODE_EDIT)
if event.key == ord('d') and self.canvas.mode == MODE_IDLE:
self.canvas.set_mode(MODE_STEP)
if event.key == ord('w') and self.canvas.mode == MODE_EDIT:
self.canvas.set_mode(MODE_WIRE)
if event.key == ord('r') and self.canvas.mode == MODE_EDIT:
self.canvas.set_mode(MODE_RENAME)
if event.key == ord('s'):
self.read_only = not self.read_only
self.canvas.request_redraw()
if event.key == pygame.K_SPACE and self.canvas.mode == MODE_STEP:
self.tick()
if event.key == pygame.K_ESCAPE:
self.canvas.request_io_redraw()
if self.canvas.mode == MODE_STEP:
self.canvas.set_mode(MODE_IDLE)
if self.canvas.mode == MODE_EDIT:
self.clear_selection()
self.canvas.set_mode(MODE_IDLE)
if self.canvas.mode == MODE_WIRE:
self.canvas.set_mode(MODE_EDIT)
self.highlight(LIGHT_NONE)
if self.canvas.mode == MODE_ADD:
self.canvas.set_mode(MODE_EDIT)
self.new_node = False
if self.canvas.mode == MODE_ADD_MODULE:
self.canvas.set_mode(MODE_EDIT)
self.new_node = False
if self.canvas.mode == MODE_RENAME:
self.canvas.set_mode(MODE_EDIT)
#PAN is woring allways
#RIGHT DOWN => START PAN
if event.type == pygame.MOUSEBUTTONDOWN and event.button == MID:
self.pan_x = event.pos[0] / self.zoom
self.pan_y = event.pos[1] / self.zoom
self.pan = True
self.mode_before = mode
self.canvas.set_mode(MODE_PAN)
if self.pan:
#RIGHT UP => STOP PAN
if event.type == pygame.MOUSEBUTTONUP and event.button == MID:
self.pan_offset_x += event.pos[0] / self.zoom - self.pan_x
self.pan_offset_y += event.pos[1] / self.zoom - self.pan_y
self.solve_drawable()
self.canvas.request_redraw()
self.pan = False
self.canvas.set_mode(self.mode_before)
if event.type == pygame.MOUSEMOTION:
self.pan_offset_x += event.pos[0] / self.zoom - self.pan_x
self.pan_offset_y += event.pos[1] / self.zoom - self.pan_y
self.pan_x = event.pos[0] / self.zoom
self.pan_y = event.pos[1] / self.zoom
self.solve_drawable()
self.canvas.request_redraw()
#ZOOM is working allways
if event.type == pygame.MOUSEBUTTONDOWN and event.button == WHEEL_UP:
if self.zoom < 1.5:
self.pan_offset_x -= mouse_x + self.pan_offset_x - event.pos[0] / self.zoom
self.pan_offset_y -= mouse_y + self.pan_offset_y - event.pos[1] / self.zoom
pan_x = event.pos[0] / self.zoom
pan_y = event.pos[1] / self.zoom
self.zoom += self.zoom_step
self.pan_offset_x += event.pos[0] / self.zoom - pan_x
self.pan_offset_y += event.pos[1] / self.zoom - pan_y
self.update_zoom()
if event.type == pygame.MOUSEBUTTONDOWN and event.button == WHEEL_DOWN:
if self.zoom > 0.2:
pan_x = event.pos[0] / self.zoom
pan_y = event.pos[1] / self.zoom
self.zoom -= self.zoom_step
self.pan_offset_x += event.pos[0] / self.zoom - pan_x
self.pan_offset_y += event.pos[1] / self.zoom - pan_y
self.update_zoom()
if mode == MODE_IDLE or mode == MODE_STEP:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == LEFT:
if hover_object is not False:
hover_object.click()
if mode == MODE_RENAME:
#LEFT DOWN => RENAME
if event.type == pygame.MOUSEBUTTONDOWN and event.button == LEFT:
if hover_object is not False:
if isinstance(hover_object, cell.Label):
label = utils.gui_textedit("Change the label", hover_object.label)
if len(label) == 0:
utils.gui_alert("Error", "Labels can't be empty")
else:
hover_object.label = label
hover_object.update()
self.canvas.set_mode(MODE_EDIT)
else:
if isinstance(hover_object, wire.Node):
obj = hover_object.net
else:
obj = hover_object
old_name = obj.name
name = utils.gui_textedit("Rename the object", obj.name)
if old_name == name:
return
if len(name) == 0:
utils.gui_alert("Error", "Name can't be empty")
return
if not self.rename_obj(obj, name):
utils.gui_alert("Error", "Unable to rename object")
else:
self.canvas.set_mode(MODE_EDIT)
if mode == MODE_EDIT:
#LEFT DOWN => START SELECT
if event.type == pygame.MOUSEBUTTONDOWN and event.button == LEFT:
if hover_object is False:
#SHIFT prevent clear selection
if not keys[pygame.K_LSHIFT]:
self.clear_selection()
self.canvas.set_mode(MODE_SELECT)
self.select_start = [mouse_x, mouse_y]
self.select_rect = pygame.Rect(mouse_x, mouse_y, 0, 0)
else:
if keys[pygame.K_LSHIFT]:
self.tglselect_obj(hover_object)
else:
if hover_object not in self.selected:
self.clear_selection()
self.select_obj([hover_object])
if hover_object in self.selected:
self.possible_move = True
if event.type == pygame.MOUSEBUTTONUP and event.button == LEFT:
if self.possible_move is True:
self.possible_move = False
if event.type == pygame.MOUSEMOTION:
if self.possible_move is True:
self.possible_move = False
for o in self.selected:
o.set_offset(mouse_x - o.rect[0], mouse_y - o.rect[1])
self.canvas.set_mode(MODE_MOVE)
if event.type == pygame.KEYDOWN and event.key == pygame.K_DELETE:
for o in self.selected:
self.delete(o.name)
self.clear_selection()
if mode == MODE_SELECT:
if event.type == pygame.MOUSEMOTION:
w = mouse_x - self.select_start[0]
h = mouse_y - self.select_start[1]
self.select_rect = pygame.Rect(self.select_start[0], self.select_start[1], w, h)
if event.type == pygame.MOUSEBUTTONUP and event.button == LEFT:
self.canvas.request_io_redraw()
for k in self.objects:
o = self.objects[k]
if (self.select_rect.colliderect(o.rect)):
self.select_obj([o])
self.canvas.set_mode(MODE_EDIT);
if mode == MODE_MOVE:
if event.type == pygame.MOUSEBUTTONUP and event.button == LEFT:
self.canvas.request_redraw()
for o in self.selected:
o.set_pos(mouse_x, mouse_y)
self.apply_grid(o)
if (len(self.selected) == 1):
self.clear_selection()
self.canvas.set_mode(MODE_EDIT);
if event.type == pygame.MOUSEMOTION:
self.canvas.request_redraw()
for o in self.selected:
o.set_pos(mouse_x, mouse_y)
if mode == MODE_WIRE:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == LEFT:
print
print "<<"
print "get_object_pos", hover_object
if isinstance(hover_object, wire.Node):
self.new_node = self.add_node([mouse_x, mouse_y], hover_object.net)
self.new_node.add_sibling(hover_object)
self.new_node_direction = NODE_DIR_FROM_NODE
self.solve_drawable()
return
target = self.get_input_pos([mouse_x, mouse_y])
print "get_input_pos", target
if target is not False:
obj, pin = target
self.new_node = self.add_node([mouse_x, mouse_y])
obj.assign_input(pin, self.new_node, "Y")
self.new_node_direction = NODE_DIR_FROM_INPUT
self.solve_drawable()
return
target = self.get_output_pos([mouse_x, mouse_y])
print "get_output_pos", target
if target is not False:
obj, pin = target
self.new_node = self.add_node([mouse_x, mouse_y])
self.new_node.assign_free_input(obj, pin)
self.new_node_direction = NODE_DIR_FROM_OUTPUT
self.solve_drawable()
return
target = self.get_line_pos([mouse_x, mouse_y])
print "get_line_pos", target
if target is not False:
obj, obj_pin, inp, inp_pin = target
start_node = self.add_node([mouse_x, mouse_y])
self.apply_grid(start_node)
if isinstance(inp, wire.Node):
inp.add_sibling(start_node)
start_node.net.remove_node(self.new_node)
self.delete(start_node.net.name)
inp.net.add_node(start_node)
obj.assign_input(obj_pin, start_node, "Y")
if isinstance(obj, wire.Node):
obj.add_sibling(start_node)
start_node.net.remove_node(start_node)
self.delete(start_node.net.name)
obj.net.add_node(start_node)
start_node.assign_free_input(inp, inp_pin)
self.new_node = self.add_node([mouse_x, mouse_y], start_node.net)
self.new_node.add_sibling(start_node)
self.new_node_direction = NODE_DIR_FROM_NODE
self.solve_drawable()
return
target = self.get_net_line_pos([mouse_x, mouse_y])
print "get_net_line_pos", target
if target is not False:
node1, node2, net = target
start_node = self.add_node([mouse_x, mouse_y], net)
self.apply_grid(start_node)
node1.remove_sibling(node2)
node1.add_sibling(start_node)
node2.remove_sibling(node1)
node2.add_sibling(start_node)
self.new_node = self.add_node([mouse_x, mouse_y], start_node.net)
self.new_node.add_sibling(start_node)
self.new_node_direction = NODE_DIR_FROM_NODE
self.solve_drawable()
return
else:
if hover_object is False:
start_node = self.add_node([mouse_x, mouse_y])
self.apply_grid(start_node)
self.new_node = self.add_node([mouse_x, mouse_y], start_node.net)
self.new_node.add_sibling(start_node)
self.new_node_direction = NODE_DIR_FROM_NODE
self.solve_drawable()
if event.type == pygame.MOUSEBUTTONUP and event.button == LEFT:
if self.new_node is not False:
self.new_node.set_pos(mouse_x, mouse_y)
self.apply_grid(self.new_node)
print
print ">>"
target = self.get_object_pos([mouse_x, mouse_y], [self.new_node])
print "get_object_pos", target
if target is not False:
if isinstance(target, wire.Node):
#FROM_INPUT / FROM_OUTPUT will be handeled lower
if self.new_node_direction == NODE_DIR_FROM_NODE:
prev = self.new_node.siblings[0]
target.add_sibling(prev)
prev.net.asimilate(target.net)
self.delete(self.new_node.name)
self.new_node = False
self.solve_drawable()
return
target = self.get_input_pos([mouse_x, mouse_y], [self.new_node])
print "get_input_pos", target
if target is not False and self.new_node_direction is not NODE_DIR_FROM_INPUT:
obj, pin = target
if self.new_node_direction == NODE_DIR_FROM_NODE:
obj.assign_input(pin, self.new_node.siblings[0], "Y")
if self.new_node_direction == NODE_DIR_FROM_OUTPUT:
key = self.new_node.inputs.keys()[0]
inp, inp_pin = self.new_node.inputs[key]
obj.assign_input(pin, inp, inp_pin)
self.delete(self.new_node.name)
self.new_node = False
self.solve_drawable()
return
target = self.get_output_pos([mouse_x, mouse_y], [self.new_node])
print "get_output_pos", target
if target is not False and self.new_node_direction is not NODE_DIR_FROM_OUTPUT:
obj, pin = target
if self.new_node_direction == NODE_DIR_FROM_NODE:
self.new_node.siblings[0].assign_free_input(obj , pin)
if self.new_node_direction == NODE_DIR_FROM_INPUT:
orig_obj, orig_pin = self.find_output(self.new_node, "Y")
orig_obj.assign_input(orig_pin, obj, pin)
self.delete(self.new_node.name)
self.new_node = False
self.solve_drawable()
return
target = self.get_line_pos([mouse_x, mouse_y], [self.new_node])
print "get_line_pos", target
if target is not False:
obj, obj_pin, inp, inp_pin = target
if isinstance(inp, wire.Node):
inp.add_sibling(self.new_node)
self.new_node.net.asimilate(inp.net)
else:
self.new_node.assign_free_input(inp , inp_pin)
if isinstance(obj, wire.Node):
obj.add_sibling(self.new_node)
obj.clear_input(obj_pin)
self.new_node.net.asimilate(obj.net)
else:
obj.assign_input(obj_pin, self.new_node, "Y")
self.new_node = False
self.solve_drawable()
return
target = self.get_net_line_pos([mouse_x, mouse_y], [self.new_node])
print "get_net_line_pos", target
if target is not False:
node1, node2, net = target
node1.remove_sibling(node2)
node1.add_sibling(self.new_node)
node2.remove_sibling(node1)
node2.add_sibling(self.new_node)
self.new_node.net.asimilate(net)
self.new_node = False
self.solve_drawable()
return
self.new_node = False
self.canvas.request_redraw()
if event.type == pygame.MOUSEBUTTONDOWN and event.button == RIGHT:
if self.new_node is not False:
self.delete(self.new_node.name)
self.new_node = False
else:
#delete node or split siblings or net
if isinstance(hover_object, wire.Node):
siblings = hover_object.net.list_node_sibling(hover_object)
if len(siblings) > 0:
successor = siblings[0]
for node in siblings:
successor.add_sibling(node)
for k in hover_object.inputs:
print "hover_object.input", k, hover_object, hover_object.inputs
obj, pin = hover_object.inputs[k]
successor.assign_free_input(obj, pin)
target = self.find_output(hover_object, "Y")
while target is not False:
obj, pin = target
obj.assign_input(pin, successor, "Y")
target = self.find_output(hover_object, "Y")
self.delete(hover_object.name)
self.highlight(LIGHT_NONE)
self.solve_drawable()
return
target = self.get_line_pos([mouse_x, mouse_y])
print "get_line_pos", target
if target is not False:
obj, obj_pin, inp, inp_pin = target
obj.clear_input(obj_pin)
self.highlight(LIGHT_NONE)
self.solve_drawable()
self.canvas.request_redraw()
return
target = self.get_net_line_pos([mouse_x, mouse_y], [self.new_node])
print "get_net_line_pos", target
if target is not False:
node1, node2, net = target
node1.remove_sibling(node2)
node2.remove_sibling(node1)
net.rebuild()
self.canvas.request_redraw()
self.highlight(LIGHT_NONE)
self.solve_drawable()
return
if event.type == pygame.MOUSEMOTION:
if self.new_node is not False:
self.new_node.set_pos(mouse_x, mouse_y)
self.canvas.request_redraw()
target = self.get_object_pos([mouse_x, mouse_y], [self.new_node])
# print "get_object_pos", target
if target is not False:
if isinstance(target, wire.Node):
self.highlight(LIGHT_POINT, target.output_xy["Y"]);
return
target = self.get_input_pos([mouse_x, mouse_y], [self.new_node])
# print "get_input_pos", target
if target is not False:
obj, pin = target
pos = obj.input_xy[pin]
self.highlight(LIGHT_POINT, pos);
return
target = self.get_output_pos([mouse_x, mouse_y], [self.new_node])
# print "get_output_pos", target
if target is not False:
obj, pin = target
pos = obj.output_xy[pin]
self.highlight(LIGHT_POINT, pos);
return
target = self.get_line_pos([mouse_x, mouse_y], [self.new_node])
# print "get_line_pos", target
if target is not False:
obj, obj_pin, inp, inp_pin = target
if isinstance(obj, wire.Node):
start = obj.output_xy["Y"]
else:
start = obj.input_xy[obj_pin]
if isinstance(inp, wire.Node):
end = inp.output_xy["Y"]
else:
end = inp.output_xy[inp_pin]
self.highlight(LIGHT_LINE, [start, end])
return
target = self.get_net_line_pos([mouse_x, mouse_y], [self.new_node])
# print "get_net_line_pos", target
if target is not False:
node1, node2, net = target
start = node1.output_xy["Y"]
end = node2.output_xy["Y"]
self.highlight(LIGHT_LINE, [start, end])
return
self.highlight(LIGHT_NONE)
if mode == MODE_ADD:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == RIGHT:
self.add_index = (self.add_index + 1) % len(self.add_list)
fcs = self.add_list[self.add_index]
pos = "%dx%d" % (mouse_x, mouse_y)
name = "_%s_" % fcs
self.new_node = self.canvas.cells[fcs](self)
self.new_node.update()
self.new_node.middle_offset()
self.new_node.parse([name, fcs, pos])
self.new_node.drawable = True
self.canvas.request_redraw()
if event.type == pygame.MOUSEMOTION:
if self.new_node is not False:
self.new_node.set_pos(mouse_x, mouse_y)
self.new_node.clear_io_cache()
self.canvas.request_redraw()
if event.type == pygame.MOUSEBUTTONDOWN and event.button == LEFT:
o = self.add_object(self.add_list[self.add_index], [mouse_x, mouse_y])
self.apply_grid(o)
if mode == MODE_ADD_MODULE:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == RIGHT:
fcs = "module"
pos = "%dx%d" % (mouse_x, mouse_y)
name = "_%s_" % fcs
self.new_node = self.canvas.cells[fcs](self)
self.new_node.update()
self.new_node.middle_offset()
self.new_node.parse([name, fcs, pos])
self.new_node_filename = self.new_node.filename
self.new_node.drawable = True
self.canvas.request_redraw()
if event.type == pygame.MOUSEMOTION:
if self.new_node is not False:
self.new_node.set_pos(mouse_x, mouse_y)
self.new_node.clear_io_cache()
self.canvas.request_redraw()
if event.type == pygame.MOUSEBUTTONDOWN and event.button == LEFT:
o = self.add_object("module", [mouse_x, mouse_y], [self.new_node_filename])
self.apply_grid(o)
| gpl-2.0 | -1,118,782,217,478,487,700 | 37.420918 | 149 | 0.441348 | false |
getpelican/pelican | pelican/__init__.py | 1 | 20822 | import argparse
import logging
import multiprocessing
import os
import pprint
import sys
import time
import traceback
from collections.abc import Iterable
# Combines all paths to `pelican` package accessible from `sys.path`
# Makes it possible to install `pelican` and namespace plugins into different
# locations in the file system (e.g. pip with `-e` or `--user`)
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from rich.console import Console
# pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init as init_logging
from pelican.generators import (ArticlesGenerator, # noqa: I100
PagesGenerator, SourceFileGenerator,
StaticGenerator, TemplatePagesGenerator)
from pelican.plugins import signals
from pelican.plugins._utils import get_plugin_name, load_plugins
from pelican.readers import Readers
from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer
from pelican.settings import coerce_overrides, read_settings
from pelican.utils import (FileSystemWatcher, clean_output_dir, maybe_pluralize)
from pelican.writers import Writer
try:
__version__ = __import__('pkg_resources') \
.get_distribution('pelican').version
except Exception:
__version__ = "unknown"
DEFAULT_CONFIG_NAME = 'pelicanconf.py'
logger = logging.getLogger(__name__)
console = Console()
class Pelican:
def __init__(self, settings):
"""Pelican initialisation
Performs some checks on the environment before doing anything else.
"""
# define the default settings
self.settings = settings
self.path = settings['PATH']
self.theme = settings['THEME']
self.output_path = settings['OUTPUT_PATH']
self.ignore_files = settings['IGNORE_FILES']
self.delete_outputdir = settings['DELETE_OUTPUT_DIRECTORY']
self.output_retention = settings['OUTPUT_RETENTION']
self.init_path()
self.init_plugins()
signals.initialized.send(self)
def init_path(self):
if not any(p in sys.path for p in ['', os.curdir]):
logger.debug("Adding current directory to system path")
sys.path.insert(0, '')
def init_plugins(self):
self.plugins = []
for plugin in load_plugins(self.settings):
name = get_plugin_name(plugin)
logger.debug('Registering plugin `%s`', name)
try:
plugin.register()
self.plugins.append(plugin)
except Exception as e:
logger.error('Cannot register plugin `%s`\n%s',
name, e)
self.settings['PLUGINS'] = [get_plugin_name(p) for p in self.plugins]
def run(self):
"""Run the generators and return"""
start_time = time.time()
context = self.settings.copy()
# Share these among all the generators and content objects
# They map source paths to Content objects or None
context['generated_content'] = {}
context['static_links'] = set()
context['static_content'] = {}
context['localsiteurl'] = self.settings['SITEURL']
generators = [
cls(
context=context,
settings=self.settings,
path=self.path,
theme=self.theme,
output_path=self.output_path,
) for cls in self._get_generator_classes()
]
# Delete the output directory if (1) the appropriate setting is True
# and (2) that directory is not the parent of the source directory
if (self.delete_outputdir
and os.path.commonpath([os.path.realpath(self.output_path)]) !=
os.path.commonpath([os.path.realpath(self.output_path),
os.path.realpath(self.path)])):
clean_output_dir(self.output_path, self.output_retention)
for p in generators:
if hasattr(p, 'generate_context'):
p.generate_context()
for p in generators:
if hasattr(p, 'refresh_metadata_intersite_links'):
p.refresh_metadata_intersite_links()
signals.all_generators_finalized.send(generators)
writer = self._get_writer()
for p in generators:
if hasattr(p, 'generate_output'):
p.generate_output(writer)
signals.finalized.send(self)
articles_generator = next(g for g in generators
if isinstance(g, ArticlesGenerator))
pages_generator = next(g for g in generators
if isinstance(g, PagesGenerator))
pluralized_articles = maybe_pluralize(
(len(articles_generator.articles) +
len(articles_generator.translations)),
'article',
'articles')
pluralized_drafts = maybe_pluralize(
(len(articles_generator.drafts) +
len(articles_generator.drafts_translations)),
'draft',
'drafts')
pluralized_hidden_articles = maybe_pluralize(
(len(articles_generator.hidden_articles) +
len(articles_generator.hidden_translations)),
'hidden article',
'hidden articles')
pluralized_pages = maybe_pluralize(
(len(pages_generator.pages) +
len(pages_generator.translations)),
'page',
'pages')
pluralized_hidden_pages = maybe_pluralize(
(len(pages_generator.hidden_pages) +
len(pages_generator.hidden_translations)),
'hidden page',
'hidden pages')
pluralized_draft_pages = maybe_pluralize(
(len(pages_generator.draft_pages) +
len(pages_generator.draft_translations)),
'draft page',
'draft pages')
print('Done: Processed {}, {}, {}, {}, {} and {} in {:.2f} seconds.'
.format(
pluralized_articles,
pluralized_drafts,
pluralized_hidden_articles,
pluralized_pages,
pluralized_hidden_pages,
pluralized_draft_pages,
time.time() - start_time))
def _get_generator_classes(self):
discovered_generators = [
(ArticlesGenerator, "internal"),
(PagesGenerator, "internal")
]
if self.settings["TEMPLATE_PAGES"]:
discovered_generators.append((TemplatePagesGenerator, "internal"))
if self.settings["OUTPUT_SOURCES"]:
discovered_generators.append((SourceFileGenerator, "internal"))
for receiver, values in signals.get_generators.send(self):
if not isinstance(values, Iterable):
values = (values,)
for generator in values:
if generator is None:
continue # plugin did not return a generator
discovered_generators.append((generator, receiver.__module__))
# StaticGenerator must run last, so it can identify files that
# were skipped by the other generators, and so static files can
# have their output paths overridden by the {attach} link syntax.
discovered_generators.append((StaticGenerator, "internal"))
generators = []
for generator, origin in discovered_generators:
if not isinstance(generator, type):
logger.error("Generator %s (%s) cannot be loaded", generator, origin)
continue
logger.debug("Found generator: %s (%s)", generator.__name__, origin)
generators.append(generator)
return generators
def _get_writer(self):
writers = [w for _, w in signals.get_writer.send(self) if isinstance(w, type)]
num_writers = len(writers)
if num_writers == 0:
return Writer(self.output_path, settings=self.settings)
if num_writers > 1:
logger.warning("%s writers found, using only first one", num_writers)
writer = writers[0]
logger.debug("Found writer: %s", writer)
return writer(self.output_path, settings=self.settings)
class PrintSettings(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
instance, settings = get_instance(namespace)
if values:
# One or more arguments provided, so only print those settings
for setting in values:
if setting in settings:
# Only add newline between setting name and value if dict
if isinstance(settings[setting], dict):
setting_format = '\n{}:\n{}'
else:
setting_format = '\n{}: {}'
print(setting_format.format(
setting,
pprint.pformat(settings[setting])))
else:
print('\n{} is not a recognized setting.'.format(setting))
break
else:
# No argument was given to --print-settings, so print all settings
pprint.pprint(settings)
parser.exit()
class ParseDict(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
d = {}
if values:
for item in values:
split_items = item.split("=", 1)
key = split_items[0].strip()
value = split_items[1].strip()
d[key] = value
setattr(namespace, self.dest, d)
def parse_arguments(argv=None):
parser = argparse.ArgumentParser(
description='A tool to generate a static blog, '
' with restructured text input files.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(dest='path', nargs='?',
help='Path where to find the content files.',
default=None)
parser.add_argument('-t', '--theme-path', dest='theme',
help='Path where to find the theme templates. If not '
'specified, it will use the default one included with '
'pelican.')
parser.add_argument('-o', '--output', dest='output',
help='Where to output the generated files. If not '
'specified, a directory will be created, named '
'"output" in the current path.')
parser.add_argument('-s', '--settings', dest='settings',
help='The settings of the application, this is '
'automatically set to {} if a file exists with this '
'name.'.format(DEFAULT_CONFIG_NAME))
parser.add_argument('-d', '--delete-output-directory',
dest='delete_outputdir', action='store_true',
default=None, help='Delete the output directory.')
parser.add_argument('-v', '--verbose', action='store_const',
const=logging.INFO, dest='verbosity',
help='Show all messages.')
parser.add_argument('-q', '--quiet', action='store_const',
const=logging.CRITICAL, dest='verbosity',
help='Show only critical errors.')
parser.add_argument('-D', '--debug', action='store_const',
const=logging.DEBUG, dest='verbosity',
help='Show all messages, including debug messages.')
parser.add_argument('--version', action='version', version=__version__,
help='Print the pelican version and exit.')
parser.add_argument('-r', '--autoreload', dest='autoreload',
action='store_true',
help='Relaunch pelican each time a modification occurs'
' on the content files.')
parser.add_argument('--print-settings', dest='print_settings', nargs='*',
action=PrintSettings, metavar='SETTING_NAME',
help='Print current configuration settings and exit. '
'Append one or more setting name arguments to see the '
'values for specific settings only.')
parser.add_argument('--relative-urls', dest='relative_paths',
action='store_true',
help='Use relative urls in output, '
'useful for site development')
parser.add_argument('--cache-path', dest='cache_path',
help=('Directory in which to store cache files. '
'If not specified, defaults to "cache".'))
parser.add_argument('--ignore-cache', action='store_true',
dest='ignore_cache', help='Ignore content cache '
'from previous runs by not loading cache files.')
parser.add_argument('-w', '--write-selected', type=str,
dest='selected_paths', default=None,
help='Comma separated list of selected paths to write')
parser.add_argument('--fatal', metavar='errors|warnings',
choices=('errors', 'warnings'), default='',
help=('Exit the program with non-zero status if any '
'errors/warnings encountered.'))
parser.add_argument('--logs-dedup-min-level', default='WARNING',
choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'),
help=('Only enable log de-duplication for levels equal'
' to or above the specified value'))
parser.add_argument('-l', '--listen', dest='listen', action='store_true',
help='Serve content files via HTTP and port 8000.')
parser.add_argument('-p', '--port', dest='port', type=int,
help='Port to serve HTTP files at. (default: 8000)')
parser.add_argument('-b', '--bind', dest='bind',
help='IP to bind to when serving files via HTTP '
'(default: 127.0.0.1)')
parser.add_argument('-e', '--extra-settings', dest='overrides',
help='Specify one or more SETTING=VALUE pairs to '
'override settings. If VALUE contains spaces, '
'add quotes: SETTING="VALUE". Values other than '
'integers and strings can be specified via JSON '
'notation. (e.g., SETTING=none)',
nargs='*',
action=ParseDict
)
args = parser.parse_args(argv)
if args.port is not None and not args.listen:
logger.warning('--port without --listen has no effect')
if args.bind is not None and not args.listen:
logger.warning('--bind without --listen has no effect')
return args
def get_config(args):
config = {}
if args.path:
config['PATH'] = os.path.abspath(os.path.expanduser(args.path))
if args.output:
config['OUTPUT_PATH'] = \
os.path.abspath(os.path.expanduser(args.output))
if args.theme:
abstheme = os.path.abspath(os.path.expanduser(args.theme))
config['THEME'] = abstheme if os.path.exists(abstheme) else args.theme
if args.delete_outputdir is not None:
config['DELETE_OUTPUT_DIRECTORY'] = args.delete_outputdir
if args.ignore_cache:
config['LOAD_CONTENT_CACHE'] = False
if args.cache_path:
config['CACHE_PATH'] = args.cache_path
if args.selected_paths:
config['WRITE_SELECTED'] = args.selected_paths.split(',')
if args.relative_paths:
config['RELATIVE_URLS'] = args.relative_paths
if args.port is not None:
config['PORT'] = args.port
if args.bind is not None:
config['BIND'] = args.bind
config['DEBUG'] = args.verbosity == logging.DEBUG
config.update(coerce_overrides(args.overrides))
return config
def get_instance(args):
config_file = args.settings
if config_file is None and os.path.isfile(DEFAULT_CONFIG_NAME):
config_file = DEFAULT_CONFIG_NAME
args.settings = DEFAULT_CONFIG_NAME
settings = read_settings(config_file, override=get_config(args))
cls = settings['PELICAN_CLASS']
if isinstance(cls, str):
module, cls_name = cls.rsplit('.', 1)
module = __import__(module)
cls = getattr(module, cls_name)
return cls(settings), settings
def autoreload(args, excqueue=None):
print(' --- AutoReload Mode: Monitoring `content`, `theme` and'
' `settings` for changes. ---')
pelican, settings = get_instance(args)
watcher = FileSystemWatcher(args.settings, Readers, settings)
sleep = False
while True:
try:
# Don't sleep first time, but sleep afterwards to reduce cpu load
if sleep:
time.sleep(0.5)
else:
sleep = True
modified = watcher.check()
if modified['settings']:
pelican, settings = get_instance(args)
watcher.update_watchers(settings)
if any(modified.values()):
print('\n-> Modified: {}. re-generating...'.format(
', '.join(k for k, v in modified.items() if v)))
pelican.run()
except KeyboardInterrupt:
if excqueue is not None:
excqueue.put(None)
return
raise
except Exception as e:
if (args.verbosity == logging.DEBUG):
if excqueue is not None:
excqueue.put(
traceback.format_exception_only(type(e), e)[-1])
else:
raise
logger.warning(
'Caught exception:\n"%s".', e,
exc_info=settings.get('DEBUG', False))
def listen(server, port, output, excqueue=None):
RootedHTTPServer.allow_reuse_address = True
try:
httpd = RootedHTTPServer(
output, (server, port), ComplexHTTPRequestHandler)
except OSError as e:
logging.error("Could not listen on port %s, server %s.", port, server)
if excqueue is not None:
excqueue.put(traceback.format_exception_only(type(e), e)[-1])
return
try:
print("\nServing site at: http://{}:{} - Tap CTRL-C to stop".format(
server, port))
httpd.serve_forever()
except Exception as e:
if excqueue is not None:
excqueue.put(traceback.format_exception_only(type(e), e)[-1])
return
except KeyboardInterrupt:
httpd.socket.close()
if excqueue is not None:
return
raise
def main(argv=None):
args = parse_arguments(argv)
logs_dedup_min_level = getattr(logging, args.logs_dedup_min_level)
init_logging(args.verbosity, args.fatal,
logs_dedup_min_level=logs_dedup_min_level)
logger.debug('Pelican version: %s', __version__)
logger.debug('Python version: %s', sys.version.split()[0])
try:
pelican, settings = get_instance(args)
if args.autoreload and args.listen:
excqueue = multiprocessing.Queue()
p1 = multiprocessing.Process(
target=autoreload,
args=(args, excqueue))
p2 = multiprocessing.Process(
target=listen,
args=(settings.get('BIND'), settings.get('PORT'),
settings.get("OUTPUT_PATH"), excqueue))
p1.start()
p2.start()
exc = excqueue.get()
p1.terminate()
p2.terminate()
if exc is not None:
logger.critical(exc)
elif args.autoreload:
autoreload(args)
elif args.listen:
listen(settings.get('BIND'), settings.get('PORT'),
settings.get("OUTPUT_PATH"))
else:
watcher = FileSystemWatcher(args.settings, Readers, settings)
watcher.check()
with console.status("Generating..."):
pelican.run()
except KeyboardInterrupt:
logger.warning('Keyboard interrupt received. Exiting.')
except Exception as e:
logger.critical('%s', e)
if args.verbosity == logging.DEBUG:
raise
else:
sys.exit(getattr(e, 'exitcode', 1))
| agpl-3.0 | -4,679,461,749,283,359,000 | 37.135531 | 86 | 0.564451 | false |
magenta/magenta | magenta/models/shared/sequence_generator_test.py | 1 | 3155 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sequence_generator."""
from magenta.models.shared import model
from magenta.models.shared import sequence_generator
from note_seq.protobuf import generator_pb2
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class Model(model.BaseModel):
"""Test model."""
def _build_graph_for_generation(self):
pass
class SeuenceGenerator(sequence_generator.BaseSequenceGenerator):
"""Test generator."""
def __init__(self, checkpoint=None, bundle=None):
details = generator_pb2.GeneratorDetails(
id='test_generator',
description='Test Generator')
super(SeuenceGenerator, self).__init__(
Model(), details, checkpoint=checkpoint,
bundle=bundle)
def _generate(self):
pass
class SequenceGeneratorTest(tf.test.TestCase):
def testSpecifyEitherCheckPointOrBundle(self):
bundle = generator_pb2.GeneratorBundle(
generator_details=generator_pb2.GeneratorDetails(
id='test_generator'),
checkpoint_file=[b'foo.ckpt'],
metagraph_file=b'foo.ckpt.meta')
with self.assertRaises(sequence_generator.SequenceGeneratorError):
SeuenceGenerator(checkpoint='foo.ckpt', bundle=bundle)
with self.assertRaises(sequence_generator.SequenceGeneratorError):
SeuenceGenerator(checkpoint=None, bundle=None)
SeuenceGenerator(checkpoint='foo.ckpt')
SeuenceGenerator(bundle=bundle)
def testUseMatchingGeneratorId(self):
bundle = generator_pb2.GeneratorBundle(
generator_details=generator_pb2.GeneratorDetails(
id='test_generator'),
checkpoint_file=[b'foo.ckpt'],
metagraph_file=b'foo.ckpt.meta')
SeuenceGenerator(bundle=bundle)
bundle.generator_details.id = 'blarg'
with self.assertRaises(sequence_generator.SequenceGeneratorError):
SeuenceGenerator(bundle=bundle)
def testGetBundleDetails(self):
# Test with non-bundle generator.
seq_gen = SeuenceGenerator(checkpoint='foo.ckpt')
self.assertIsNone(seq_gen.bundle_details)
# Test with bundle-based generator.
bundle_details = generator_pb2.GeneratorBundle.BundleDetails(
description='bundle of joy')
bundle = generator_pb2.GeneratorBundle(
generator_details=generator_pb2.GeneratorDetails(
id='test_generator'),
bundle_details=bundle_details,
checkpoint_file=[b'foo.ckpt'],
metagraph_file=b'foo.ckpt.meta')
seq_gen = SeuenceGenerator(bundle=bundle)
self.assertEqual(bundle_details, seq_gen.bundle_details)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 7,298,235,038,731,701,000 | 31.193878 | 74 | 0.720127 | false |
lliendo/Radar | radar/logger/__init__.py | 1 | 2413 | # -*- coding: utf-8 -*-
"""
This file is part of Radar.
Radar is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Radar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with Radar. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Lucas Liendo.
"""
from logging import getLogger, Formatter, shutdown, INFO
from logging.handlers import RotatingFileHandler
from os.path import dirname
from os import mkdir
from errno import EEXIST
from sys import stderr
class LoggerError(Exception):
pass
class RadarLogger(object):
_shared_state = {'logger': None}
def __init__(self, path, logger_name='radar', max_size=100, rotations=5):
self.__dict__ = self._shared_state
self._create_dir(path)
self._shared_state['logger'] = self._configure_logger(path, logger_name, max_size * (1024 ** 2), rotations)
def _create_dir(self, path):
try:
mkdir(dirname(path))
except OSError as e:
if e.errno != EEXIST:
raise LoggerError('Error - Couldn\'t create directory : \'{:}\'. Details : {:}.'.format(path, e.strerror))
def _configure_logger(self, path, logger_name, max_size, rotations):
try:
logger = getLogger(logger_name)
logger.setLevel(INFO)
file_handler = RotatingFileHandler(path, maxBytes=max_size, backupCount=rotations)
file_handler.setFormatter(Formatter(fmt='%(asctime)s - %(message)s', datefmt='%b %d %H:%M:%S'))
logger.addHandler(file_handler)
except Exception as e:
raise LoggerError('Error - Couldn\'t configure Radar logger. Details : {:}.'.format(e))
return logger
@staticmethod
def log(message):
try:
RadarLogger._shared_state['logger'].info(message)
except Exception as e:
stderr.write('Error - Couldn\'t log to Radar logger. Details : {:}.'.format(e))
@staticmethod
def shutdown():
shutdown()
| lgpl-3.0 | -8,177,158,718,975,967,000 | 32.513889 | 122 | 0.665147 | false |
gopythongo/gopythongo | src/py/gopythongo/vaultgetcert.py | 1 | 33878 | # -* encoding: utf-8 *-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import functools
import os
import subprocess
import sys
import hvac
import configargparse
from typing import Dict, Sequence, Iterable, Union, Any, cast, TextIO, Callable
from OpenSSL import crypto
from gopythongo.main import DebugConfigAction
from requests.exceptions import RequestException
out_target = sys.stdout
umask_cur = os.umask(0o022)
os.umask(umask_cur)
def _out(*args: Any, **kwargs: Any) -> None:
if "file" not in kwargs:
kwargs["file"] = sys.stderr
print(*args, **kwargs)
def _result_output(envvar: str, value: str) -> None:
print("%s=%s" % (envvar, value,), file=out_target)
def _result_envdir(envdir: str, envvar: str, value: str) -> None:
fn = os.path.join(envdir, envvar)
_out("writing %s=%s" % (envvar, fn))
with open(fn, mode="wt", encoding="utf-8") as envf:
envf.write(value)
_result = _result_output # type: Callable[..., None]
def _get_masked_mode(mode: Union[int, str]) -> int:
if isinstance(mode, str):
m = int(mode, base=8)
else:
m = mode
return (0o777 ^ umask_cur) & m
class HelpAction(configargparse.Action):
def __init__(self,
option_strings: Sequence[str],
dest: str,
default: Any=None,
choices: Iterable[Any]=None,
help: str="Show help for GoPythonGo version parsers.") -> None:
super().__init__(option_strings=option_strings, dest=dest, default=default,
nargs="?", choices=choices, help=help)
def __call__(self, parser: configargparse.ArgumentParser, namespace: configargparse.Namespace,
values: Union[str, Sequence[Any], None], option_string: str=None) -> None:
print("Secret Management\n"
"=================\n"
"\n"
"This is a little helper tool that contacts a Vault server to issue a SSL client\n"
"certificate and save its X.509 certificate and private key to local files. If\n"
"you use this on your build server to create client certificates for each\n"
"Continuous Integration (CD) build, you can create client credentials for\n"
"accessing Vault instances or databases or other services on your environments\n"
"right on your buildserver. In my opinion this is the best place for the\n"
"credentials to live, since they can be dynamic and don't need to live in either\n"
"your configuration management software (Puppet/Chef/Ansible/Salt) or in your\n"
"application. Both places which are often shared far and wide throughout your\n"
"organization.\n"
"\n"
"Instead giving each build its own certificate and each deployment environment\n"
"(development/stage/production) it's own (intermediate) CA does not detract from\n"
"security (you have to trust your CD infrastructure implicitly, if it's\n"
"compromised, the attacker can deploy malicious code), but makes it much easier\n"
"to, for example, revoke access credentials in bulk using CRLs.\n"
"\n"
"Finally, you can use the created certificates to access a separate Vault\n"
"instance inside your deployment environments and create local service\n"
"credentials there (like short-lived database access credentials). Thereby\n"
"using Vault's audit backends to create a secure offsite audit trail of activity.\n"
"\n"
"vaultgetcert can also output environment variable key/value pairs and create\n"
"multiple certificate chains for cross-signed trust paths, allowing you to\n"
"centralize secret management as described in the GoPythonGo process "
"documentation.\n"
"\n"
"Here is a cheatsheet for setting up a PKI endpoint in Vault:\n"
"\n"
"# Mount one PKI backend per environment and/or application that gets its own\n"
"# builds on this server and allow builds to remain valid for 1 year (tune to\n"
"# your specifications). Application CAs are better suited to Vault as it binds\n"
"# roles to CAs. Environment CAs are better suited to some servers like Postgres\n"
"# as they bind roles to CNs. Using vaultgetcert you can also easily use\n"
"# cross-signed intermediate CAs and use both approaches.\n"
"vault mount -path=pki-YourApplication -default-lease-ttl=8760h \\\n"
" -max-lease-ttl=8760h pki\n"
"\n"
"# generate an intermediate CA with a 2048 bit key (default)\n"
"vault write pki-YourApplication/intermediate/generate/internal \\\n"
" common_name=\"(YourApplication) Build CA X1\"\n"
"\n"
"# Sign the intermediate CA using your private CA\n"
"# then write the certificate back to the Vault store\n"
"vault write pki-YourApplication/intermediate/set-signed [email protected]\n"
"\n"
"# Now this CA certificate should be installed on the relevant servers, e.g. in\n"
"# Postgres ssl_ca_cert. You can also use the root certificate with a trustchain\n"
"# in the client certificate.\n"
"vault write pki-YourApplication/roles/build ttl=8760h allow_localhost=false \\\n"
" allow_ip_sans=false server_flag=false client_flag=true \\\n"
" allow_any_name=true key_type=rsa\n"
"\n"
"# Request a build certificate for a build.\n"
"# This is basically what vaultgetcert does! So instead of running this command\n"
"# use vaultgetcert :)\n"
"# We \"hack\" the git hash into a domain name SAN because Vault currently\n"
"# doesn't support freetext SANs.\n"
"vault write pki-YourApplication/issue/build common_name=\"yourapp\" \\\n"
" alt_names=\"024572834273498734.git\" exclude_cn_from_sans=true\n"
"\n"
"# Set everything up to authenticate to Vault using these certs. For example:\n"
"vault auth-enable cert\n"
"vault mount -path=db-YourApplication postgresql\n"
"vault write db-YourApplication/config/lease lease=96h lease_max=96h\n"
"vault write db-YourApplication/config/connection connection_url=-\n"
"postgresql://vaultadmin:(PASSWORD)@postgresql.local:5432/YourAppDatabase\n"
"\n"
"vault write db-YourApplication/roles/fullaccess sql=-\n"
" CREATE ROLE \"{{name}}\" WITH LOGIN ENCRYPTED PASSWORD '{{password}}' VALID\n"
" UNTIL '{{expiration}}' IN ROLE \"YourAppDBOwner\" INHERIT NOCREATEROLE\n"
" NOCREATEDB NOSUPERUSER NOREPLICATION NOBYPASSRLS;\n"
"\n"
"vault policy-write yourapp_rights -\n"
"path \"db-YourApplication/creds/fullaccess\" {\n"
" capabilities = [\"read\"]\n"
"}\n"
"\n"
"vault write auth/cert/certs/YourApplication \\\n"
" display_name=yourapp \\\n"
" policies=yourapp_rights \\\n"
" [email protected] \\\n"
" ttl=3600\n")
parser.exit(0)
def get_parser() -> configargparse.ArgumentParser:
parser = configargparse.ArgumentParser(
description="This is a little helper tool that contacts a Vault server to issue a SSL client "
"certificate and save its X.509 certificate and private key to local files. Use "
"--help-verbose to learn more. vaultgetcert expects everything to be PEM encoded. "
"It cannot convert between different formats.",
prog="gopythongo.vaultgetcert",
args_for_setting_config_path=["-c"],
config_arg_help_message="Use this path instead of the default (.gopythongo/vaultwrapper)",
default_config_files=[".gopythongo/vaultgetcert",]
)
parser.add_argument("-o", "--output", dest="output", default=None, env_var="VGC_OUTPUT",
help="Direct output to this file or folder (when in envdir mode). (default: stdout)")
parser.add_argument("--envdir", dest="envdir_mode", default=False, action="store_true", env_var="VGC_ENVDIR",
help="When this is set, vaultgetcert will write each environment variable setting into its "
"own file, creating a DJB daemontools compatible envdir.")
parser.add_argument("--address", dest="vault_address", default="https://vault.local:8200",
env_var="VGC_VAULT_URL",
help="Vault API base URL (default: https://vault.local:8200/). ")
parser.add_argument("--vault-pki", dest="vault_pki", default=None, required=True,
env_var="VGC_VAULT_PKI",
help="The PKI backend path to issue a certificate from Vault (e.g. 'pki/issue/[role]').")
parser.add_argument("--subject-alt-names", dest="subject_alt_names", env_var="VGC_SUBJECT_ALTNAME",
default=None,
help="alt_names parameter to pass to Vault for the issued certificate. (Use a comma-separated "
"list if you want to specify more than one.)")
parser.add_argument("--common-name", dest="common_name", env_var="VGC_COMMON_NAME", default=None, required=True,
help="The CN to pass to Vault for the issued certificate.")
parser.add_argument("--include-cn-in-sans", dest="include_cn_in_sans", env_var="VGC_INCLUDE_CN_IN_SANS",
default=False, action="store_true",
help="Set this if you want the value of --common-name to also show up in the issued "
"certificate's SANs.")
parser.add_argument("--certfile-out", dest="certfile", env_var="VGC_CERTFILE_OUT", required=True,
help="Path of the file where the generated certificate will be stored. ")
parser.add_argument("--keyfile-out", dest="keyfile", env_var="VGC_KEYFILE_OUT", required=True,
help="Path of the file where the generated private key will be stored. Permissions for this "
"file will be set to 600.")
parser.add_argument("--certchain-out", dest="certchain", env_var="VGC_CERTCHAIN_OUT", default=None,
help="Save the issuer CA certificate, which is likely the intermediate CA that you need to "
"provide in the certificate chain.")
parser.add_argument("--overwrite", dest="overwrite", env_var="VGC_OVERWRITE", default=False, action="store_true",
help="When set, this program will overwrite existing certificates and keys on disk. ")
parser.add_argument("--help-verbose", action=HelpAction,
help="Show additional information about how to set up Vault for using vaultgetcert.")
parser.add_argument("--debug-config", action=DebugConfigAction)
gp_xsign = parser.add_argument_group("Handling cross-signing CAs")
gp_xsign.add_argument("--xsign-cacert", dest="xsigners", default=[], action="append", env_var="VGC_XSIGN_CACERT",
help="Can be set multiple times. The argument must be in the form 'bundlename=certificate'. "
"For each certificate specified, vaultgetcert will verify that it uses the same public "
"key as the issuer certificate returned by Vault. It will then create a bundle "
"(concatenated PEM file) for each xsign-cacert with the specified name. MUST be used "
"together with --xsign-bundle-path. You can specify an absolute path for bundlename in "
"which case --xsign-bundle-path will not be used for that bundlename. This option has "
"hacky support for multiple values in its environment variable. You can specify "
"multiple comma-separated values.")
gp_xsign.add_argument("--issuer-bundle", dest="issuer_bundle", default=None,
help="The argument for this is the bundlename for the issuer certificate returned by Vault. "
"That bundlename will be handled like --xsign-cacert bundlenames. It can also be used "
"in --output-bundle-envvar, thereby allowing you to use whichever CA Vault returns like "
"any other well-known CA.")
gp_xsign.add_argument("--xsign-bundle-path", dest="bundlepath", default=None, env_var="VGC_XSIGN_BUNDLE_PATH",
help="A folder where all of the generated files without absolute paths from specified "
"--xsign-cacert parameters will be stored. Existing bundles will be overwritten.")
gp_xsign.add_argument("--output-bundle-envvar", dest="bundle_envvars", default=[], action="append",
env_var="VGC_OUTPUT_BUNDLE_ENVVAR",
help="Can be specified multiple times. The argument must be in the form "
"'envvar=bundlename[:altpath]' (altpath is optional). "
"For each envvar specified vaultgetcert will output 'envvar=bundlepath' to stdout. If "
"you specify 'altpath', 'altpath' will replace the FULL path in bundlepath. The "
"filename will stay the same. This output is meant to be used as configuration "
"environment variables for your program and can be shipped, for example, for usage in "
"/etc/default.")
gp_xsign.add_argument("--output-key-envvar", dest="key_envvars", default=[], action="append",
env_var="VGC_OUTPUT_KEY_ENVVAR",
help="Can be specified multiple times. Output one or more key/value pairs to stdout in the "
"form 'envvar=keyfile' where 'keyfile' is the file specified by --keyfile-out. Each "
"argument should be formatted like 'envvar[:altpath]' where 'altpath' is optional. If "
"'altpath' is specified, the keyfile's path will be replaced by 'altpath' in the "
"output.")
gp_filemode = parser.add_argument_group("File mode options")
gp_filemode.add_argument("--mode-mkdir-output", dest="mode_output_dir", default="0o755",
env_var="VGC_MODE_MKDIR_OUTPUT",
help="If the output folder for the environment variable configuration (--output) doesn't "
"exist yet, create it with these permissions (will be umasked). (default: 0o755)")
gp_filemode.add_argument("--mode-mkdir-certs", dest="mode_certs_dir", default="0o755",
env_var="VGC_MODE_MKDIR_CERTS",
help="If the output folders for certificates and bundles (--certfile-out, "
"--certchain-out, --xsign-bundle-path) doesn't exist yet, create them with these "
"permissions (will be umasked). (default: 0o755)")
gp_filemode.add_argument("--mode-mkdir-key", dest="mode_key_dir", default="0o700",
env_var="VGC_MODE_MKDIR_KEY",
help="If the output folder for the private key (--keyfile-out) doesn't exist yet, "
"create it with these permissions (will be umasked). (default: 0o700)")
gp_filemode.add_argument("--mode-file-output", dest="mode_output_file", default="0o644",
env_var="VGC_MODE_FILE_OUTPUT",
help="Create the output file (--output) with these permissions (will be umasked). "
"(default: 0o644)")
gp_filemode.add_argument("--mode-certbundles", dest="mode_certbundle_files", default="0o644",
env_var="VGC_MODE_CERTBUNDLES",
help="Create the certbundle files (--xsign-cacert) with these permissions (will be "
"umasked). (default: 0o644)")
gp_filemode.add_argument("--mode-keyfile", dest="mode_key_file", default="0o600",
env_var="VGC_MODE_KEYFILE",
help="Create the private key file (--keyfile-out) with these permissions (will be "
"umasked). (default: 0o600)")
gp_https = parser.add_argument_group("HTTPS options")
gp_https.add_argument("--pin-cacert", dest="pin_cacert", default="/etc/ssl/certs/ca-certificates.crt",
env_var="VGC_VAULT_CACERT",
help="Set the CA certificate for Vault (i.e. the server certificate MUST be signed by a CA "
"in this file). The file should contain a list of CA certificates. The default is the "
"location of the Debian Linux CA bundle (Default: '/etc/ssl/certs/ca-certificates.crt')")
gp_https.add_argument("--tls-skip-verify", dest="verify", env_var="VGC_SSL_SKIP_VERIFY", default=True,
action="store_false",
help="Skip SSL verification (only use this during debugging or development!)")
gp_auth = parser.add_argument_group("Vault authentication options")
gp_auth.add_argument("--token", dest="vault_token", env_var="VAULT_TOKEN", default=None,
help="A Vault access token with a valid lease. This is one way of authenticating the wrapper "
"to Vault. This is mutually exclusive with --app-id/--user-id. ")
gp_auth.add_argument("--app-id", dest="vault_appid", env_var="VAULT_APPID", default=None,
help="Set the app-id for Vault app-id authentication.")
gp_auth.add_argument("--user-id", dest="vault_userid", env_var="VAULT_USERID", default=None,
help="Set the user-id for Vault app-id authentication.")
gp_auth.add_argument("--client-cert", dest="client_cert", default=None, env_var="VAULT_CLIENTCERT",
help="Use a HTTPS client certificate to connect.")
gp_auth.add_argument("--client-key", dest="client_key", default=None, env_var="VAULT_CLIENTKEY",
help="Set the HTTPS client certificate private key.")
gp_git = parser.add_argument_group("Git integration")
gp_git.add_argument("--use-git", dest="git_binary", default="/usr/bin/git", env_var="VGC_GIT",
help="Specify an alternate git binary to call for git integration. (default: /usr/bin/git)")
gp_git.add_argument("--git-include-commit-san", dest="git_include_commit_san", default=".", action="store_true",
env_var="VGC_INCLUDE_COMMIT_SAN",
help="If 'git rev-parse HEAD' returns a commit hash, add a certificate SAN called "
"'[commithash].git'.")
return parser
xsign_bundles = {} # type: Dict[str, str]
bundle_vars = {} # type: Dict[str, Dict[str, str]]
def validate_args(args: configargparse.Namespace) -> None:
if args.vault_token:
pass
elif args.vault_appid and args.vault_userid:
pass
elif args.client_cert and args.client_key:
pass
else:
_out("* ERR VAULT CERT UTIL *: You must specify an authentication method, so you must pass either "
"--token or --app-id and --user-id or --client-cert and --client-key or set the VAULT_TOKEN, "
"VAULT_APPID and VAULT_USERID environment variables respectively. If you run GoPythonGo under "
"sudo (e.g. for pbuilder), make sure your build server environment variables also exist in the "
"root shell, or build containers, or whatever else you're using.")
if args.vault_appid:
_out("* INF VAULT CERT UTIL *: appid is set")
if args.vault_userid:
_out("* INF VAULT CERT UTIL *: userid is set")
if args.client_cert:
_out("* INF VAULT CERT UTIL *: client_cert is set")
if args.client_key:
_out("* INF VAULT CERT UTIL *: client_key is set")
sys.exit(1)
if args.client_cert and (not os.path.exists(args.client_cert) or not os.access(args.client_cert, os.R_OK)):
_out("* ERR VAULT CERT UTIL *: %s File not found or no read privileges" % args.client_cert)
sys.exit(1)
if args.client_key and (not os.path.exists(args.client_key) or not os.access(args.client_key, os.R_OK)):
_out("* ERR VAULT CERT UTIL *: %s File not found or no read privileges" % args.client_key)
sys.exit(1)
if os.path.exists(args.certfile) and not args.overwrite:
_out("* ERR VAULT CERT UTIL *: %s already exists and --overwrite is not specified" % args.certfile)
sys.exit(1)
if os.path.exists(os.path.dirname(args.certfile)) and not os.access(os.path.dirname(args.certfile), os.W_OK):
_out("* ERR VAULT CERT UTIL *: %s already exists and is not writable (--certfile-out)" %
os.path.dirname(args.certfile))
sys.exit(1)
if os.path.exists(args.keyfile) and not args.overwrite:
_out("* ERR VAULT CERT UTIL *: %s already exists and --overwrite is not specified" % args.keyfile)
sys.exit(1)
if os.path.exists(os.path.dirname(args.keyfile)) and not os.access(os.path.dirname(args.keyfile), os.W_OK):
_out("* ERR VAULT CERT UTIL *: %s already exists and is not writable (--keyfile-out)" %
os.path.dirname(args.keyfile))
sys.exit(1)
if args.git_include_commit_san and (not os.path.exists(args.git_binary) or not os.access(args.git_binary, os.X_OK)):
_out("* ERR VAULT CERT UTIL *: --git-include-commit-san is set, but Git binary %s does not exist or is not "
"executable" % args.git_binary)
sys.exit(1)
for xcertspec in args.xsigners:
if "," in xcertspec:
xcertspec, y = xcertspec.split(",", 1)[0].strip(), xcertspec.split(",", 1)[1].strip()
args.xsigners += [y]
if "=" not in xcertspec:
_out("* ERR VAULT CERT UTIL *: each --xsign-cacert argument must be formed as 'bundlename=certificate'. "
"%s is not." % xcertspec)
bundlename, xcert = xcertspec.split("=", 1)
if bundlename not in xsign_bundles.keys():
xsign_bundles[bundlename] = xcert
else:
_out("* ERR VAULT CERT UTIL *: duplicate xsigner bundle name %s (from 1:%s and 2:%s=%s)" %
(bundlename, xcertspec, bundlename, xsign_bundles[bundlename]))
if not os.path.exists(xcert) or not os.access(xcert, os.R_OK):
_out("* ERR VAULT CERT UTIL *: %s does not exist or is not readable (from %s)" % (xcert, xcertspec))
sys.exit(1)
if args.issuer_bundle:
xsign_bundles[args.issuer_bundle] = None
if args.bundlepath:
if os.path.exists(args.bundlepath) and not os.access(args.bundlepath, os.W_OK):
_out("* ERR VAULT CERT UTIL *: %s is not writable" % args.bundlepath)
for benvspec in args.bundle_envvars:
if "=" not in benvspec:
_out("* ERR VAULT CERT UTIL *: each --output-bundle-envvar must be formed as 'envvar=bundlename[:altpath]' "
"with altpath being optional. %s is not." % benvspec)
sys.exit(1)
envvar, bundlespec = benvspec.split("=", 1)
if ":" in bundlespec:
bundleref, altpath = bundlespec.split(":", 1)
else:
bundleref, altpath = bundlespec, None
if bundleref not in xsign_bundles.keys():
_out("* ERR VAULT CERT UTIL *: --output-bundle-envvar argument %s references a bundle name %s which has "
"not been specified as an argument to --xsign-cacert." % (benvspec, bundleref))
sys.exit(1)
_out("* INF VAULT CERT UTIL *: registered environment %s" % envvar)
bundle_vars[bundleref] = {
"envvar": envvar,
"altpath": altpath,
}
for perms in [args.mode_output_dir, args.mode_certs_dir, args.mode_key_dir, args.mode_output_file,
args.mode_certbundle_files, args.mode_key_file]:
try:
int(perms, base=8)
except ValueError:
_out("* ERR VAULT CERT UTIL *: %s is not a vaild permission string (must be octal unix file/folder "
"permissions" % perms)
sys.exit(1)
if args.envdir_mode and os.path.exists(args.output) and not os.path.isdir(args.output):
_out("* ERR VAULT CERT UTIL *: %s already exists and is not a directory. --envdir requires the output path "
"to be a directory or not exist.")
def main() -> None:
global out_target, _result
_out("* INF VAULT CERT UTIL *: cwd is %s" % os.getcwd())
parser = get_parser()
args = parser.parse_args()
validate_args(args)
vcl = hvac.Client(url=args.vault_address,
token=args.vault_token if args.vault_token else None,
verify=args.pin_cacert if args.pin_cacert else args.verify,
cert=(
args.client_cert,
args.client_key
) if args.client_cert else None)
if not vcl.is_authenticated():
try:
if args.client_cert:
vcl.auth_tls()
if args.vault_appid:
vcl.auth_app_id(args.vault_appid, args.vault_userid)
except RequestException as e:
_out("* ERR VAULT CERT UTIL *: Failure while authenticating to Vault. (%s)" % str(e))
sys.exit(1)
if not vcl.is_authenticated():
_out("* ERR VAULT CERT UTIL *: vaultgetcert was unable to authenticate with Vault, but no error occured "
":(.")
sys.exit(1)
alt_names = args.subject_alt_names or ""
if args.git_include_commit_san:
try:
output = subprocess.check_output([args.git_binary, "rev-parse", "HEAD"],
stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as e:
_out("* ERR VAULT CERT UTIL *: Error %s. trying to get the Git commit hash (git rev-parse HEAD) failed "
"with\n%s" % (e.returncode, e.output))
sys.exit(e.returncode)
output = output.strip()
if len(output) != 40:
_out("* ERR VAULT CERT UTIL *: Git returned a commit-hash of length %s (%s) instead of 40." %
(len(output), output))
sys.exit(1)
if alt_names == "":
alt_names = "%s.git" % output
else:
alt_names = "%s.git,%s" % (output, alt_names)
try:
res = vcl.write(args.vault_pki, common_name=args.common_name, alt_names=alt_names,
exclude_cn_from_sans=not args.include_cn_in_sans)
except RequestException as e:
_out("* ERR VAULT WRAPPER *: Unable to read Vault path %s. (%s)" % (args.cert_key, str(e)))
sys.exit(1)
if "data" not in res or "certificate" not in res["data"] or "private_key" not in res["data"]:
_out("* ERR VAULT CERT UTIL *: Vault returned a value without the necessary fields "
"(data->certificate,private_key). Returned dict was:\n%s" %
str(res))
if os.path.dirname(args.certfile) != "" and not os.path.exists(os.path.dirname(args.certfile)):
_out("* INF VAULT CERT UTIL *: Creating folder %s" % os.path.dirname(args.certfile))
os.makedirs(os.path.dirname(args.certfile), mode=_get_masked_mode(args.mode_certs_dir), exist_ok=True)
if os.path.dirname(args.keyfile) != "" and not os.path.exists(os.path.dirname(args.keyfile)):
_out("* INF VAULT CERT UTIL *: Creating folder %s" % os.path.dirname(args.keyfile))
os.makedirs(os.path.dirname(args.keyfile), mode=_get_masked_mode(args.mode_key_dir), exist_ok=True)
for bundlename in xsign_bundles.keys():
if os.path.dirname(bundlename) != "" and not os.path.exists(os.path.dirname(bundlename)):
_out("* INF VAULT CERT UTIL *: Creating folder %s" % os.path.dirname(bundlename))
os.makedirs(os.path.dirname(bundlename), mode=_get_masked_mode(args.mode_certs_dir),
exist_ok=True)
with open(args.certfile, "wt", encoding="ascii") as certfile, \
open(args.keyfile, "wt", encoding="ascii") as keyfile:
os.chmod(args.certfile, _get_masked_mode(args.mode_certbundle_files))
os.chmod(args.keyfile, _get_masked_mode(args.mode_key_file))
certfile.write(res["data"]["certificate"].strip())
certfile.write("\n")
keyfile.write(res["data"]["private_key"].strip())
keyfile.write("\n")
if args.certchain:
with open(args.certchain, "wt", encoding="ascii") as certchain:
certchain.write(res["data"]["issuing_ca"].strip())
certchain.write("\n")
_out("* INF VAULT CERT UTIL *: the issued certificate and key have been stored in %s and %s" %
(args.certfile, args.keyfile))
if args.certchain:
_out("* INF VAULT CERT UTIL *: the certificate chain has been stored in %s" % args.certchain)
vault_pubkey = crypto.load_certificate(
crypto.FILETYPE_PEM,
res["data"]["issuing_ca"]
).get_pubkey().to_cryptography_key().public_numbers()
vault_subject = crypto.load_certificate(
crypto.FILETYPE_PEM,
res["data"]["issuing_ca"]
).get_subject().get_components()
if args.bundlepath and not os.path.exists(args.bundlepath):
os.makedirs(args.bundlepath, mode=_get_masked_mode(args.mode_certs_dir), exist_ok=True)
for bundlename in xsign_bundles.keys():
if xsign_bundles[bundlename] is None:
x509str = res["data"]["issuing_ca"]
else:
with open(xsign_bundles[bundlename], mode="rt", encoding="ascii") as xcacert:
x509str = xcacert.read()
# the cross-signing certificate must sign the same keypair as the issueing_ca returned by Vault.
# Let's check...
xsign_pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, x509str).get_pubkey() \
.to_cryptography_key().public_numbers()
if vault_pubkey != xsign_pubkey:
xsign_subject = crypto.load_certificate(crypto.FILETYPE_PEM, x509str).get_subject().get_components()
_out("* ERR VAULT CERT UTIL *: Cross-signing certificate %s has a different public key as the CA returned "
"by Vault. This certificate is invalid for the bundle.\n"
"***Xsign subject***\n%s\n***Vault subject***\n%s" %
(bundlename,
", ".join(["%s=%s" % (k.decode("utf-8"), v.decode("utf-8")) for k, v in xsign_subject]),
", ".join(["%s=%s" % (k.decode("utf-8"), v.decode("utf-8")) for k, v in vault_subject])))
sys.exit(1)
fn = bundlename
if args.bundlepath and not os.path.isabs(bundlename):
fn = os.path.join(args.bundlepath, os.path.basename(bundlename))
with open(fn, "wt", encoding="ascii") as bundle:
_out("* INF VAULT CERT UTIL *: Creating bundle %s" % fn)
bundle.write(res["data"]["certificate"].strip())
bundle.write("\n")
bundle.write(x509str.strip())
bundle.write("\n")
if args.output and args.envdir_mode:
if not os.path.exists(args.output):
os.makedirs(args.output, mode=_get_masked_mode(0o755), exist_ok=True)
_result = cast(Callable[..., None], functools.partial(_result_envdir, args.output))
_out("writing envdir to %s" % args.output)
elif args.output:
if not os.path.exists(os.path.dirname(args.output)):
os.makedirs(os.path.dirname(args.output), mode=_get_masked_mode(0o755), exist_ok=True)
out_target = cast(TextIO, open(args.output, mode="wt", encoding="utf-8"))
_out("writing output to %s" % args.output)
for bundleref in bundle_vars.keys():
# _result goes to stdout or --output
fn = bundleref
if args.bundlepath and not os.path.isabs(bundleref):
fn = os.path.join(args.bundlepath, bundleref)
_result(bundle_vars[bundleref]["envvar"],
fn.replace(os.path.dirname(fn), bundle_vars[bundleref]["altpath"])
if bundle_vars[bundleref]["altpath"] else fn)
for keyvar in args.key_envvars:
if ":" in keyvar:
envvar, altpath = keyvar.split(":", 1)
else:
envvar, altpath = keyvar, None
_result(envvar, args.keyfile.replace(os.path.dirname(args.keyfile), altpath) if altpath else args.keyfile)
if args.output:
out_target.close()
_out("*** Done.")
if __name__ == "__main__":
main()
| mpl-2.0 | -4,284,949,911,080,452,000 | 55.842282 | 120 | 0.591977 | false |
naver/hubblemon | psutil_mon/settings.py | 1 | 1388 |
#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# alarm settings
#
# absolute
"""
# example
alarm_conf_absolute = {
'*:net-*':{ # any machine, any net if
'bytes_recv':(50000000, 60000000, 90000000),
},
# ex) for specipy machine or machine group
'sys01.db:resource':{
'retransmit':(50, 200, 800),
},
'sys??.db:resource':{
'retransmit':(1, 1, 1),
},
}
"""
alarm_conf_absolute = {
'*:net-*':{
'bytes_recv':(90000000, 100000000, 110000000),
'bytes_sent':(90000000, 100000000, 110000000),
},
'*:resource': {
'retransmit':(5, 20, 80),
},
}
# ratio
alarm_conf_lambda = {
'*:memory':{
lambda x, limit: (x['used'] / x['total'] > limit, 'ratio of used/total(%f) exceeds %f' % (x['used'] / x['total'], limit)) : (0.95, None, None),
},
}
| apache-2.0 | -3,339,088,020,987,683,300 | 21.031746 | 146 | 0.644092 | false |
vpadillar/pventa | venta/migrations/0026_auto_20160407_1907.py | 1 | 13004 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-07 19:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('venta', '0025_auto_20160216_1924'),
]
operations = [
migrations.AlterModelOptions(
name='bill',
options={'verbose_name': 'Factura', 'verbose_name_plural': 'Facturas'},
),
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'Categoria', 'verbose_name_plural': 'Categorias'},
),
migrations.AlterModelOptions(
name='client',
options={'verbose_name': 'Cliente', 'verbose_name_plural': 'Clientes'},
),
migrations.AlterModelOptions(
name='config',
options={'verbose_name': 'Configuracion', 'verbose_name_plural': 'Tu configuracion'},
),
migrations.AlterModelOptions(
name='image',
options={'verbose_name': 'Imagen', 'verbose_name_plural': 'Imagenes'},
),
migrations.AlterModelOptions(
name='itemorder',
options={'verbose_name': 'Item de Orden', 'verbose_name_plural': 'Items de Orden'},
),
migrations.AlterModelOptions(
name='order',
options={'verbose_name': 'Orden', 'verbose_name_plural': 'Ordenes'},
),
migrations.AlterModelOptions(
name='presentation',
options={'verbose_name': 'Presentaci\xf3n', 'verbose_name_plural': 'Presentaciones'},
),
migrations.AlterModelOptions(
name='product',
options={'verbose_name': 'Producto', 'verbose_name_plural': 'Productos'},
),
migrations.AlterModelOptions(
name='service',
options={'verbose_name': 'Servicio', 'verbose_name_plural': 'Servicios'},
),
migrations.AlterModelOptions(
name='userservice',
options={'verbose_name': 'Usuario a Servicio', 'verbose_name_plural': 'Usuarios a Servicio'},
),
migrations.AddField(
model_name='bill',
name='ipoconsumo',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='IpoConsumo'),
preserve_default=False,
),
migrations.AddField(
model_name='bill',
name='iva',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='IVA'),
preserve_default=False,
),
migrations.AddField(
model_name='bill',
name='subtotal',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Sub Total'),
preserve_default=False,
),
migrations.AddField(
model_name='config',
name='iva',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='paid',
field=models.BooleanField(default=False, verbose_name='Pagado'),
),
migrations.AddField(
model_name='service',
name='moviles',
field=models.TextField(default=' '),
preserve_default=False,
),
migrations.AddField(
model_name='service',
name='printer',
field=models.CharField(default=' ', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='bill',
name='card',
field=models.CharField(default='0', max_length=45, verbose_name='Tarjeta'),
),
migrations.AlterField(
model_name='bill',
name='cash',
field=models.CharField(max_length=45, verbose_name='Efectivo'),
),
migrations.AlterField(
model_name='bill',
name='casher',
field=models.CharField(max_length=45, verbose_name='Cajero'),
),
migrations.AlterField(
model_name='bill',
name='cc',
field=models.CharField(default='', max_length=45, verbose_name='Numero de c\xe9dula'),
),
migrations.AlterField(
model_name='bill',
name='check',
field=models.CharField(default='0', max_length=45, verbose_name='Cheque'),
),
migrations.AlterField(
model_name='bill',
name='date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Fecha'),
),
migrations.AlterField(
model_name='bill',
name='disscount',
field=models.CharField(default='0', max_length=45, verbose_name='Descuento'),
),
migrations.AlterField(
model_name='bill',
name='name',
field=models.CharField(default='', max_length=45, verbose_name='Nombre'),
),
migrations.AlterField(
model_name='bill',
name='paid',
field=models.BooleanField(default=True, verbose_name='Pagado'),
),
migrations.AlterField(
model_name='bill',
name='products',
field=models.TextField(verbose_name='Productos'),
),
migrations.AlterField(
model_name='bill',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Service', verbose_name='Servicio'),
),
migrations.AlterField(
model_name='bill',
name='tel',
field=models.CharField(default='', max_length=45, verbose_name='Tel\xe9fono'),
),
migrations.AlterField(
model_name='bill',
name='tip',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Propina'),
),
migrations.AlterField(
model_name='bill',
name='total',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Total'),
),
migrations.AlterField(
model_name='bill',
name='totaltip',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Total + Propina'),
),
migrations.AlterField(
model_name='bill',
name='waiter',
field=models.CharField(max_length=45, verbose_name='Mesero'),
),
migrations.AlterField(
model_name='category',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='venta.Image', verbose_name='Imagen'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=45, verbose_name='Nombre'),
),
migrations.AlterField(
model_name='category',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Service', verbose_name='Servicio'),
),
migrations.AlterField(
model_name='client',
name='cc',
field=models.IntegerField(unique=True, verbose_name='Numero de c\xe9dula'),
),
migrations.AlterField(
model_name='client',
name='email',
field=models.EmailField(default='', max_length=254, verbose_name='Correo'),
),
migrations.AlterField(
model_name='client',
name='name',
field=models.CharField(max_length=45, verbose_name='Nombre'),
),
migrations.AlterField(
model_name='client',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Service', verbose_name='Servicio'),
),
migrations.AlterField(
model_name='client',
name='tel',
field=models.CharField(max_length=20, verbose_name='Tel\xe9fono'),
),
migrations.AlterField(
model_name='config',
name='ipoconsumo',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='config',
name='propina',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='image',
name='name',
field=models.CharField(max_length=45, verbose_name='Nombre'),
),
migrations.AlterField(
model_name='image',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Service', verbose_name='Servicio'),
),
migrations.AlterField(
model_name='image',
name='url',
field=models.ImageField(upload_to='category_images/', verbose_name='Ruta de la imagen'),
),
migrations.AlterField(
model_name='itemorder',
name='count',
field=models.IntegerField(verbose_name='Cantidad'),
),
migrations.AlterField(
model_name='itemorder',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Product', verbose_name='Producto'),
),
migrations.AlterField(
model_name='order',
name='bill',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='venta.Bill', verbose_name='Factura'),
),
migrations.AlterField(
model_name='order',
name='canceled',
field=models.BooleanField(default=False, verbose_name='Cancelado'),
),
migrations.AlterField(
model_name='order',
name='client',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='venta.Client', verbose_name='Cliente'),
),
migrations.AlterField(
model_name='order',
name='date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Fecha'),
),
migrations.AlterField(
model_name='order',
name='products',
field=models.ManyToManyField(to='venta.ItemOrder', verbose_name='Productos'),
),
migrations.AlterField(
model_name='order',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Service', verbose_name='Servicio'),
),
migrations.AlterField(
model_name='presentation',
name='name',
field=models.CharField(max_length=45, verbose_name='Nombre'),
),
migrations.AlterField(
model_name='presentation',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Service', verbose_name='Servicio'),
),
migrations.AlterField(
model_name='product',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Category', verbose_name='Categor\xeda'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=45, verbose_name='Nombre'),
),
migrations.AlterField(
model_name='product',
name='presentation',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='venta.Presentation', verbose_name='Presentaci\xf3n'),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Precio'),
),
migrations.AlterField(
model_name='service',
name='code',
field=models.CharField(db_index=True, max_length=45, unique=True, verbose_name='Codigo'),
),
migrations.AlterField(
model_name='service',
name='name',
field=models.CharField(max_length=45, unique=True, verbose_name='Nombre'),
),
migrations.AlterField(
model_name='userservice',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venta.Service', verbose_name='Servicio'),
),
migrations.AlterField(
model_name='userservice',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuario'),
),
]
| mit | -3,283,579,268,307,782,700 | 37.702381 | 149 | 0.559597 | false |
rekyuu/rpyg | src/objects/tiles.py | 1 | 2154 | # Default datasets for dungeon tiles.
walls = {
'north' : True,
'east' : True,
'south' : True,
'west' : True
}
entities = {
'items' : [],
'objects' : [],
'enemies' : [],
'npcs' : []
}
# Defines a series of tiles with walls.
class Tile (object):
def __init__ (self, walls=walls, entities=entities, text=''):
# Indentifies an identified tile for maze generation.
self.visited = False
# Tile walls definitions, defined by a dictionary of booleans or definitions.
self.wall_north = walls['north']
self.wall_east = walls['east']
self.wall_south = walls['south']
self.wall_west = walls['west']
# Defines if the tile is an entrance or exit.
self.entrance = False
self.exit = False
# Lists of various entities on the tile.
self.items = entities['items']
self.objects = entities['objects']
self.enemies = entities['enemies']
self.npcs = entities['npcs']
# Text that displays when the player enters the tile.
self.text = text
# Removes walls during generation.
def remove_wall (self, wall):
if wall == 'north':
self.wall_north = False
elif wall == 'east':
self.wall_east = False
elif wall == 'south':
self.wall_south = False
elif wall == 'west':
self.wall_west = False
# Marks a tile as processed during generation.
def visit (self):
self.visited = True
# Sets the tile as the entrance.
def set_entrance (self):
self.entrance = True
# Sets the tile as the exit.
def set_exit (self):
self.exit = True
# Sets a list of items on the tile.
def set_items (self, items):
self.items = items
# Sets a list of interactable objects on the tile.
def set_objects (self, objects):
self.objects = objects
# Sets a list of enemies on the tile.
def set_enemies (self, enemies):
self.enemies = enemies
# Sets a list of npcs on the tile.
def set_npcs (self, npcs):
self.npcs = npcs
# Text that displays as the player(s) enter the tile.
def enter_text (self):
out = ['You enter a dim corridor.']
if self.exit:
out.append('\nYou find yourself at the exit.')
out = ''.join(out)
return out
def set_text (self, text):
self.text = text
| mit | -8,520,644,150,798,013,000 | 20.117647 | 79 | 0.654596 | false |
ibid/ibid | ibid/source/__init__.py | 1 | 2434 | # Copyright (c) 2008-2010, Michael Gorven, Stefano Rivera
# Released under terms of the MIT/X/Expat Licence. See COPYING for details.
from copy import copy
try:
from twisted.plugin import pluginPackagePaths
except ImportError:
# Not available in Twisted 2.5.0 in Ubuntu hardy
# This is straight from twisted.plugin
import os.path
import sys
def pluginPackagePaths(name):
package = name.split('.')
return [os.path.abspath(os.path.join(x, *package)) for x in sys.path
if not os.path.exists(os.path.join(x, *package + ['__init__.py']))]
__path__ = pluginPackagePaths(__name__) + __path__
class IbidSourceFactory(object):
supports = ()
auth = ()
permissions = ()
def __new__(cls, *args):
cls.type = cls.__module__.split('.')[2]
for name, option in options.items():
new = copy(option)
default = getattr(cls, name)
new.default = default
setattr(cls, name, new)
return super(IbidSourceFactory, cls).__new__(cls, *args)
def __init__(self, name):
self.name = name
self.setup()
def setup(self):
"Apply configuration. Called on every config reload"
pass
def setServiceParent(self, service):
"Start the source and connect"
raise NotImplementedError
def connect(self):
"Connect (if disconncted)"
return self.setServiceParent(None)
def disconnect(self):
"Disconnect source"
raise NotImplementedError
def url(self):
"Return a URL describing the source"
return None
def logging_name(self, identity):
"Given an identity or connection, return a name suitable for logging"
return identity
def truncation_point(self, response, event=None):
"""Given a target, and possibly a related event, return the number of
bytes to clip at, or None to indicate that a complete message will
be delivered.
"""
if (event is not None
and response.get('target', None) == event.get('channel', None)
and event.get('public', True)):
return 490
return None
from ibid.config import Option
options = {
'auth': Option('auth', 'Authentication methods to allow'),
'permissions': Option('permissions', 'Permissions granted to users on this source')
}
# vi: set et sta sw=4 ts=4:
| gpl-3.0 | 6,954,459,534,629,569,000 | 28.325301 | 87 | 0.618324 | false |
tfeldmann/tryagain | test_tryagain.py | 1 | 8433 | import mock
import pytest
import logging
import tryagain
import functools
class Namespace:
pass
def _return_true():
return True
def _raise_exception():
raise Exception()
def test_call_once():
assert tryagain.call(_return_true) is True
def test_call_twice():
assert tryagain.call(_return_true, max_attempts=2) is True
def test_raise_after_retry():
with pytest.raises(Exception):
tryagain.call(_raise_exception, max_attempts=2)
def test_wait_time():
def works_on_second_try():
if ns.count == 0:
ns.count = 1
raise ValueError
return True
ns = Namespace()
ns.count = 0
with mock.patch('time.sleep') as mock_sleep:
assert tryagain.call(works_on_second_try, wait=1.2) is True
mock_sleep.assert_called_once_with(1.2)
def test_custom_wait_function():
def mywait(attempt):
ns.counter = attempt
return 0
ns = Namespace()
ns.counter = 0
with pytest.raises(Exception):
tryagain.call(_raise_exception, wait=mywait, max_attempts=2)
assert ns.counter == 1
def test_repeat():
assert (
list(tryagain._repeat('x', times=10)) ==
['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x'])
def test_is_callable():
with pytest.raises(TypeError):
tryagain._assert_callable(None, allow_none=False)
with pytest.raises(TypeError):
tryagain._assert_callable(3, allow_none=True)
assert tryagain._assert_callable(_return_true) is None
assert tryagain._assert_callable(lambda: None) is None
def test_attempts():
with pytest.raises(ValueError):
tryagain.call(_return_true, max_attempts=0)
assert tryagain.call(_return_true, max_attempts=None)
assert tryagain.call(_return_true, max_attempts=1)
def test_full_execution():
ns = Namespace()
actions = []
ns.count = 0
def unstable():
ns.count += 1
if ns.count == 3:
actions.append('success %s' % ns.count)
return 'result %s' % ns.count
else:
actions.append('fail %s' % ns.count)
raise Exception
def cleanup():
actions.append('cleanup %s' % ns.count)
def pre_retry():
actions.append('pre_retry %s' % ns.count)
def wait(attempt):
actions.append('wait %s' % attempt)
return 0
result = tryagain.call(unstable, wait=wait, max_attempts=5,
cleanup_hook=cleanup, pre_retry_hook=pre_retry)
print(actions)
assert actions == [
'fail 1', 'cleanup 1', 'wait 1', 'pre_retry 1',
'fail 2', 'cleanup 2', 'wait 2', 'pre_retry 2',
'success 3']
assert result == 'result 3'
def test_full_execution_decorator():
ns = Namespace()
actions = []
ns.count = 0
def cleanup():
actions.append('cleanup %s' % ns.count)
def pre_retry():
actions.append('pre_retry %s' % ns.count)
def wait(attempt):
actions.append('wait %s' % attempt)
return 0
@tryagain.retries(wait=wait, max_attempts=5,
cleanup_hook=cleanup, pre_retry_hook=pre_retry)
def unstable():
ns.count += 1
if ns.count == 3:
actions.append('success %s' % ns.count)
return 'result %s' % ns.count
else:
actions.append('fail %s' % ns.count)
raise Exception
result = unstable()
print(actions)
assert actions == [
'fail 1', 'cleanup 1', 'wait 1', 'pre_retry 1',
'fail 2', 'cleanup 2', 'wait 2', 'pre_retry 2',
'success 3']
assert result == 'result 3'
class reprwrapper(object):
def __init__(self, repr, func):
self._repr = repr
self._func = func
functools.update_wrapper(self, func)
def __call__(self, *args, **kw):
return self._func(*args, **kw)
def __repr__(self):
return self._repr
def test_logging():
ns = Namespace()
ns.count = 0
def unstable():
ns.count += 1
if ns.count == 2:
return True
else:
raise Exception('Exception message')
wrapped_unstable = reprwrapper('unstable', unstable)
logger = logging.getLogger('tryagain')
with mock.patch.object(logger, 'debug') as mock_debug:
assert tryagain.call(wrapped_unstable) is True
mock_debug.assert_called_once_with(
'Attempt 1 at calling unstable failed (Exception message)')
def test_logging_limited_attempts():
ns = Namespace()
ns.count = 0
def unstable():
ns.count += 1
if ns.count == 2:
return True
else:
raise Exception('Exception message')
wrapped_unstable = reprwrapper('unstable', unstable)
logger = logging.getLogger('tryagain')
with mock.patch.object(logger, 'debug') as mock_debug:
assert tryagain.call(wrapped_unstable, max_attempts=5) is True
mock_debug.assert_called_once_with(
'Attempt 1 / 5 at calling unstable failed (Exception message)')
def test_decorator():
ns = Namespace()
ns.count = 0
@tryagain.retries()
def unstable():
ns.count += 1
if ns.count == 2:
return True
else:
raise Exception('Exception message')
assert tryagain.call(unstable)
def test_decorator_with_parameters():
ns = Namespace()
ns.count = 0
@tryagain.retries(max_attempts=5)
def unstable():
ns.count += 1
if ns.count == 2:
return True
else:
raise Exception('Exception message')
assert tryagain.call(unstable)
def test_decorator_in_class():
class MyClass:
def __init__(self):
self.count = 0
@tryagain.retries(max_attempts=5)
def unstable(self, pass_on_count):
self.count += 1
if self.count == pass_on_count:
return True
else:
raise Exception('Exception message')
with pytest.raises(Exception):
c1 = MyClass()
c1.unstable(pass_on_count=10)
c2 = MyClass()
assert c2.unstable(pass_on_count=2) is True
def test_decorator_fails():
ns = Namespace()
ns.count = 0
@tryagain.retries(max_attempts=5)
def unstable(pass_on_count=2):
ns.count += 1
if ns.count == pass_on_count:
return True
else:
raise Exception('Exception message')
with pytest.raises(Exception):
unstable(pass_on_count=10)
ns.count = 0
assert unstable(pass_on_count=2) is True
def test_unexpected_exception():
@tryagain.retries(max_attempts=5, exceptions=(TypeError, ValueError))
def unstable():
ns.count += 1
raise EnvironmentError()
ns = Namespace()
ns.count = 0
with pytest.raises(EnvironmentError):
unstable()
assert ns.count == 1
def test_multiple_exceptions():
@tryagain.retries(exceptions=(ValueError, OSError))
def unstable(pass_on_count=2):
ns.count += 1
if ns.count == 1:
raise OSError
elif ns.count < pass_on_count:
raise ValueError
else:
return True
ns = Namespace()
ns.count = 0
assert unstable(pass_on_count=5) is True
def test_exception_in_wait_function():
def wait(attempt):
raise ValueError('Exception in wait function')
with pytest.raises(ValueError):
tryagain.call(_raise_exception, wait=wait)
def test_exception_in_cleanup_hook():
def cleanup():
raise ValueError('Exception in cleanup')
with pytest.raises(ValueError):
tryagain.call(_raise_exception, cleanup_hook=cleanup)
def test_exception_in_pre_retry_hook():
def pre_retry():
raise ValueError('Exception in pre_retry hook')
with pytest.raises(ValueError):
tryagain.call(_raise_exception, pre_retry_hook=pre_retry)
def test_callable_hooks():
def wait():
# parameter 'attempt' is missing
pass
def pre_retry(too, many, arguments):
pass
def cleanup(too, many, arguments):
pass
with pytest.raises(TypeError):
tryagain.call(_raise_exception, wait=wait)
with pytest.raises(TypeError):
tryagain.call(_raise_exception, pre_retry_hook=pre_retry)
with pytest.raises(TypeError):
tryagain.call(_raise_exception, cleanup_hook=cleanup)
| mit | 3,310,043,868,446,940,000 | 22.957386 | 75 | 0.59362 | false |
thiagopa/planyourexchange-server | restserver/auto_complete.py | 1 | 1351 | """
Copyright (C) 2015, Thiago Pagonha,
Plan Your Exchange, easy exchange to fit your budget
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import autocomplete_light
from restserver import geo_airports
class AirPortAutoComplete(autocomplete_light.AutocompleteBase):
choices = geo_airports.keys()
"""
Airport names auto complete
"""
def choices_for_request(self):
"""
Return the list of choices that are available. Uses :py:attr:`request`
if set, this method is used by
:py:meth:`~.base.AutocompleteBase.autocomplete_html`.
"""
q = self.request.GET.get('q', '')
return [k for k in self.choices if k.startswith(q)]
| agpl-3.0 | -1,966,798,133,976,847,000 | 36.527778 | 78 | 0.689859 | false |
paulcwatts/django-auth-utils | auth_utils/utils.py | 1 | 1104 | from django.contrib.auth.models import User
from django.conf import settings
def is_allowed_username(username):
disallowed = getattr(settings, 'AUTH_DISALLOWED_USERNAMES', [])
return username.lower() not in disallowed
def get_username(basename):
disallowed = getattr(settings, 'AUTH_DISALLOWED_USERNAMES', [])
# Truncate the basename to 27 characters
# (The username is only 30 characters)
basename = basename[:27]
if basename.lower() not in disallowed:
try:
# First just try their username
User.objects.get(username__iexact=basename)
except User.DoesNotExist:
return basename
i = 0
while True:
try:
username = basename + str(i)
if username.lower() not in disallowed:
User.objects.get(username__iexact=username)
i = i + 1
except User.DoesNotExist:
return username
def email_to_username(email):
# Generate a unique username from the email address
basename = email.split('@')[0].lower()
return get_username(basename)
| bsd-3-clause | 8,254,636,861,293,090,000 | 28.052632 | 67 | 0.641304 | false |
tdsticks/crontab | py/wiki20/wiki20/model/__init__.py | 1 | 2437 | # -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
maker = sessionmaker(autoflush=True, autocommit=False,
extension=ZopeTransactionExtension())
DBSession = scoped_session(maker)
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
# from sqlalchemy import MetaData
# metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
######
def init_model(engine):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure(bind=engine)
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
#
# global t_reflected
# t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
# mapper(Reflected, t_reflected)
# Import your model modules here.
from wiki20.model.auth import User, Group, Permission
from wiki20.model.page import Page
__all__ = ('User', 'Group', 'Permission')
| gpl-2.0 | -153,422,786,090,273,340 | 37.68254 | 78 | 0.736972 | false |
qsnake/qsnake | spkg/base/qsnake_run.py | 1 | 27165 | #! /usr/bin/env python
import os
import sys
from time import sleep
from glob import glob
from os.path import expandvars
from optparse import OptionParser
import tempfile
import subprocess
import time
import urllib2
import json
version = "0.9.12"
release_date = "May 7, 2011"
class CmdException(Exception):
pass
class PackageBuildFailed(Exception):
pass
class PackageNotFound(Exception):
pass
def main():
systemwide_python = (os.environ["QSNAKE_SYSTEMWIDE_PYTHON"] == "yes")
if systemwide_python:
print """\
***************************************************
Qsnake is not installed. Running systemwide Python.
Only use this mode to install Qsnake.
***************************************************"""
parser = OptionParser(usage="""\
[options] [commands]
Commands:
update Updates the downloaded packages
install PACKAGE Installs the package 'PACKAGE'
list Lists all installed packages
test Runs the Qsnake testsuite
develop Equivalent of 'setup.py develop'""")
parser.add_option("--version",
action="store_true", dest="version",
default=False, help="print Qsnake version and exit")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose",
default=False, help="Make Qsnake verbose")
parser.add_option("-i", "--install",
action="store", type="str", dest="install", metavar="PACKAGE",
default="", help="install a spkg package")
parser.add_option("-f", "--force",
action="store_true", dest="force",
default=False, help="force the installation")
parser.add_option("-d", "--download_packages",
action="store_true", dest="download",
default=False, help="download standard spkg packages")
parser.add_option("-b", "--build",
action="store_true", dest="build",
default=False, help="build Qsnake")
parser.add_option("-j",
action="store", type="int", dest="cpu_count", metavar="NCPU",
default=0, help="number of cpu to use (0 = all), default 0")
parser.add_option("-s", "--shell",
action="store_true", dest="shell",
default=False, help="starts a Qsnake shell")
parser.add_option("--script",
action="store", type="str", dest="script", metavar="SCRIPT",
default=None, help="runs '/bin/bash SCRIPT' in a Qsnake shell")
# Not much used:
#parser.add_option("--python",
# action="store", type="str", dest="python", metavar="SCRIPT",
# default=None, help="runs 'python SCRIPT' in a Qsnake shell")
# These are not used either:
#parser.add_option("--unpack",
# action="store", type="str", dest="unpack", metavar="PACKAGE",
# default=None, help="unpacks the PACKAGE into the 'devel/' dir")
#parser.add_option("--pack",
# action="store", type="str", dest="pack", metavar="PACKAGE",
# default=None, help="creates 'devel/PACKAGE.spkg' from 'devel/PACKAGE'")
#parser.add_option("--devel-install",
# action="store", type="str", dest="devel_install", metavar="PACKAGE",
# default=None, help="installs 'devel/PACKAGE' into Qsnake directly")
parser.add_option("--create-package",
action="store", type="str", dest="create_package",
metavar="PACKAGE", default=None,
help="creates 'PACKAGE.spkg' in the current directory using the official git repository sources")
parser.add_option("--upload-package",
action="store", type="str", dest="upload_package",
metavar="PACKAGE", default=None,
help="upload 'PACKAGE.spkg' from the current directory to the server (for Qsnake developers only)")
parser.add_option("--release-binary",
action="store_true", dest="release_binary",
default=False, help="creates a binary release using the current state (for Qsnake developers only)")
parser.add_option("--lab",
action="store_true", dest="run_lab",
default=False, help="runs lab()")
parser.add_option("--verify-database",
action="store_true", dest="verify_database",
default=False,
help="verifies the package database integrity")
parser.add_option("--erase-binary",
action="store_true", dest="erase_binary",
default=False,
help="erases all binaries (keeps downloads)")
options, args = parser.parse_args()
if options.verbose:
global global_cmd_echo
global_cmd_echo = True
if len(args) == 1:
arg, = args
if arg == "update":
command_update()
return
elif arg == "list":
command_list()
return
elif arg == "develop":
command_develop()
return
elif arg == "test":
run_tests()
return
print "Unknown command"
sys.exit(1)
elif len(args) == 2:
arg1, arg2 = args
if arg1 == "install":
try:
install_package(arg2, cpu_count=options.cpu_count,
force_install=options.force)
except PackageBuildFailed:
print
print "Package build failed."
return
print "Unknown command"
sys.exit(1)
elif len(args) == 0:
pass
else:
print "Too many arguments"
sys.exit(1)
if options.download:
download_packages()
return
if options.install:
try:
install_package(options.install, cpu_count=options.cpu_count,
force_install=options.force)
except PackageBuildFailed:
pass
return
if options.build:
build(cpu_count=options.cpu_count)
return
if options.shell:
print "Type CTRL-D to exit the Qsnake shell."
cmd("cd $CUR; /bin/bash --rcfile $QSNAKE_ROOT/spkg/base/qsnake-shell-rc")
return
if options.script:
setup_cpu(options.cpu_count)
try:
cmd("cd $CUR; /bin/bash " + options.script)
except CmdException:
print "Qsnake script exited with an error."
return
#if options.python:
# cmd("cd $CUR; /usr/bin/env python " + options.python)
# return
#if options.unpack:
# pkg = pkg_make_absolute(options.unpack)
# print "Unpacking '%(pkg)s' into 'devel/'" % {"pkg": pkg}
# cmd("mkdir -p $QSNAKE_ROOT/devel")
# cmd("cd $QSNAKE_ROOT/devel; tar xjf %s" % pkg)
# return
#if options.pack:
# dir = options.pack
# if not os.path.exists(dir):
# dir = expandvars("$QSNAKE_ROOT/devel/%s" % dir)
# if not os.path.exists(dir):
# raise Exception("Unknown package to pack")
# dir = os.path.split(dir)[1]
# print "Creating devel/%(dir)s.spkg from devel/%(dir)s" % {"dir": dir}
# cmd("cd $QSNAKE_ROOT/devel; tar cjf %(dir)s.spkg %(dir)s" % \
# {"dir": dir})
# return
#if options.devel_install:
# dir = options.devel_install
# if not os.path.exists(dir):
# dir = expandvars("$QSNAKE_ROOT/devel/%s" % dir)
# if not os.path.exists(dir):
# raise Exception("Unknown package to pack")
# dir = os.path.normpath(dir)
# dir = os.path.split(dir)[1]
# print "Installing devel/%(dir)s into Qsnake" % {"dir": dir}
# cmd("mkdir -p $QSNAKE_ROOT/spkg/build/")
# cmd("rm -rf $QSNAKE_ROOT/spkg/build/%(dir)s" % {"dir": dir})
# cmd("cp -r $QSNAKE_ROOT/devel/%(dir)s $QSNAKE_ROOT/spkg/build/" % \
# {"dir": dir})
# setup_cpu(options.cpu_count)
# cmd("cd $QSNAKE_ROOT/spkg/build/%(dir)s; /bin/bash spkg-install" % \
# {"dir": dir})
# cmd("rm -rf $QSNAKE_ROOT/spkg/build/%(dir)s" % {"dir": dir})
# return
if options.create_package:
create_package(options.create_package)
return
if options.upload_package:
upload_package(options.upload_package)
return
if options.release_binary:
release_binary()
return
if options.run_lab:
run_lab()
return
if options.verify_database:
verify_database()
return
if options.erase_binary:
erase_binary()
return
if options.version:
show_version()
return
if systemwide_python:
parser.print_help()
else:
start_qsnake()
def setup_cpu(cpu_count):
if cpu_count == 0:
try:
import multiprocessing
cpu_count = multiprocessing.cpu_count() + 1
except ImportError:
cpu_count = 1
if cpu_count > 1:
os.environ["MAKEFLAGS"] = "-j %d" % cpu_count
# If this variable is True, "cmd" will echo each command. It'd be nice to
# refactor this somehow, so that we don't need this global variable. This
# variable is set to True if the user passes the "-v" switch to qsnake:
global_cmd_echo = False
def cmd(s, capture=False, ok_exit_code_list=None, echo=False):
"""
ok_exit_code_list ... a list of ok exit codes (otherwise cmd() raises an
exception)
"""
if ok_exit_code_list is None:
ok_exit_code_list = [0]
if echo or global_cmd_echo:
print s
s = expandvars(s)
if capture:
p = subprocess.Popen(s, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = p.communicate()[0]
r = p.returncode
else:
output = None
r = os.system(s)
if r not in ok_exit_code_list:
raise CmdException("Command '%s' failed with err=%d." % (s, r))
return output
def create_package(package):
git_repo = "http://github.com/qsnake/" + package + ".git"
a = git_repo.rfind("/") + 1
b = git_repo.rfind(".git")
dir_name = git_repo[a:b]
print "Creating a package in the current directory."
print "Package name:", package
print "Git repository:", git_repo
tmp = tempfile.mkdtemp()
print "Using temporary directory:", tmp
cur = cmd("echo $CUR", capture=True).strip()
cmd("cd %s; git clone --depth 1 %s" % (tmp, git_repo))
commit = cmd("cd %s/%s; git rev-parse HEAD" % (tmp, dir_name),
capture=True).strip()
cmd("cd %s/%s; rm -rf .git" % (tmp, dir_name))
sha = commit[:7]
if os.path.exists("%s/%s/spkg-prepare" % (tmp, dir_name)):
print "spkg-prepare found, running it..."
cmd("cd %s/%s; sh spkg-prepare" % (tmp, dir_name))
if os.path.exists("%s/%s/spkg-install" % (tmp, dir_name)):
print "spkg-install file exists, not doing anything"
elif os.path.exists("%s/%s/setup.py" % (tmp, dir_name)):
print "spkg-install file doesn't exist, creating one for setup.py"
f = open("%s/%s/spkg-install" % (tmp, dir_name), "w")
f.write("""
#! /bin/sh
if [ "$SPKG_LOCAL" = "" ]; then
echo "SPKG_LOCAL undefined ... exiting";
echo "Maybe run 'qsnake --shell'?"
exit 1
fi
set -e
python setup.py install
""")
f.close()
else:
raise Exception("spkg-install nor setup.py is present")
new_dir_name = "%s-%s" % (package, sha)
pkg_filename = "%s.spkg" % (new_dir_name)
cmd("cd %s; mv %s %s" % (tmp, dir_name, new_dir_name))
print "Creating the spkg package..."
cmd("cd %s; tar cjf %s %s" % (tmp, pkg_filename, new_dir_name))
cmd("cp %s/%s %s/%s" % (tmp, pkg_filename, cur, pkg_filename))
print
print "Package created: %s" % (pkg_filename)
def upload_package(package):
cmd("cd $CUR; scp %s spilka.math.unr.edu:/var/www3/qsnake.org/packages/qsnake_st/" % (package))
print "Package uploaded: %s" % (package)
def release_binary():
tmp = tempfile.mkdtemp()
qsnake_dir = "qsnake-%s" % version
print "Using temporary directory:", tmp
cur = cmd("echo $CUR", capture=True).strip()
cmd("mkdir %s/%s" % (tmp, qsnake_dir))
print "Copying qsnake into the temporary directory..."
cmd("cd $QSNAKE_ROOT; cp -r * %s/%s/" % (tmp, qsnake_dir))
print "Removing source SPKG packages"
cmd("rm -f %s/%s/spkg/standard/*" % (tmp, qsnake_dir))
print "Creating a binary tarball"
cmd("cd %s; tar czf %s.tar.gz %s" % (tmp, qsnake_dir, qsnake_dir))
cmd("cd $QSNAKE_ROOT; cp %s/%s.tar.gz ." % (tmp, qsnake_dir))
print
print "Package created: %s.tar.gz" % (qsnake_dir)
def show_version():
s = "Qsnake Version %s, Release Date: %s" % (version, release_date)
print s
def start_qsnake(debug=False):
if debug:
print "Loading IPython..."
try:
import IPython
except ImportError:
raise Exception("You need to install 'ipython'")
if debug:
print " Done."
banner_length = 70
l = "| Qsnake Version %s, Release Date: %s" % (version, release_date)
l += " " * (banner_length - len(l) - 1) + "|"
banner = "-" * banner_length + "\n" + l + "\n"
l = "| Type lab() for the GUI."
l += " " * (banner_length - len(l) - 1) + "|"
banner += l + "\n" + "-" * banner_length + "\n"
namespace = {"lab": run_lab}
os.environ["IPYTHONDIR"] = expandvars("$DOT_SAGE/ipython")
os.environ["IPYTHONRC"] = "ipythonrc"
if not os.path.exists(os.environ["IPYTHONRC"]):
cmd('mkdir -p "$DOT_SAGE"')
cmd('cp -r "$QSNAKE_ROOT/spkg/base/ipython" "$DOT_SAGE/"')
os.environ["MPLCONFIGDIR"] = expandvars("$DOT_SAGE/matplotlib")
if not os.path.exists(os.environ["MPLCONFIGDIR"]):
cmd('cp -r "$QSNAKE_ROOT/spkg/base/matplotlib" "$DOT_SAGE/"')
if debug:
print "Starting the main loop..."
c = IPython.config.loader.Config()
c.InteractiveShell.confirm_exit = False
IPython.frontend.terminal.embed.InteractiveShellEmbed(config=c,
user_ns=namespace, banner1=banner).mainloop(local_ns={})
def download_packages():
print "Downloading standard spkg packages"
cmd("mkdir -p $QSNAKE_ROOT/spkg/standard")
spkg, git, provided = get_standard_packages()
for p in spkg:
cmd("cd $QSNAKE_ROOT/spkg/standard; ../base/qsnake-wget %s" % p)
for p in git:
# Obtain the latest hash from github:
url = "https://api.github.com/repos/qsnake/%s/branches"
try:
data = urllib2.urlopen(url % p).read()
except urllib2.HTTPError:
print "Can't open the url:", url % p
raise
data = json.loads(data)
i = 0
while data[i]["name"] != "master": i += 1
commit = data[i]["commit"]["sha"]
sha = commit[:7]
path = "$QSNAKE_ROOT/spkg/standard/%s-%s.spkg" % (p, sha)
# If we already have this hash, do nothing, otherwise update the
# package:
if os.path.exists(expandvars(path)):
print "Package '%s' (%s) is current, not updating." % (p, sha)
else:
cmd("rm -f $QSNAKE_ROOT/spkg/standard/%s-*.spkg" % p)
cmd("cd $QSNAKE_ROOT/spkg/standard; ../../qsnake --create-package %s" % p)
print "\n"
def install_package_spkg(pkg):
print "Installing %s..." % pkg
name, version = extract_name_version_from_path(pkg)
cmd("mkdir -p $QSNAKE_ROOT/spkg/build")
cmd("mkdir -p $QSNAKE_ROOT/spkg/installed")
# Remove the possible old builddir
cmd("cd $QSNAKE_ROOT/spkg/build; rm -rf %s-%s" % (name, version))
try:
cmd("cd $QSNAKE_ROOT/spkg/build; tar xjf %s" % pkg)
except CmdException:
print "Not a bz2 archive, trying gzip..."
try:
cmd("cd $QSNAKE_ROOT/spkg/build; tar xzf %s" % pkg)
except CmdException:
print "Not a bz2 nor gzip archive, trying tar..."
cmd("cd $QSNAKE_ROOT/spkg/build; tar xf %s" % pkg)
cmd("cd $QSNAKE_ROOT/spkg/build/%s-%s; chmod +x spkg-install" % (name, version))
try:
cmd("cd $QSNAKE_ROOT/spkg/build/%s-%s; . $QSNAKE_ROOT/local/bin/qsnake-env; ./spkg-install" % (name, version))
except CmdException:
raise PackageBuildFailed()
cmd("cd $QSNAKE_ROOT/spkg/build; rm -rf %s-%s" % (name, version))
def install_package(pkg, install_dependencies=True, force_install=False,
cpu_count=0):
"""
Installs the package "pkg".
"pkg" can be either a full path, or just the name of the package (with or
without a version).
"install_dependencies" ... if True, it will also install all dependencies
"force_install" ... if True, it will install the package even if it has
been already installed
"cpu_count" ... number of processors to use (0 means the number of
processors in the machine)
Examples:
>>> install_package("http://qsnake.org/stpack/python-2.6.4.p9.spkg")
>>> install_package("spkg/standard/readline-6.0.spkg")
>>> install_package("readline-6.0.spkg")
>>> install_package("readline")
"""
if pkg.startswith("http") or pkg.startswith("www"):
# Download from the web:
remote = True
import tempfile
tmpdir = tempfile.mkdtemp()
cmd("wget --directory-prefix=" + tmpdir + " " + pkg)
pkg_name = os.path.split(pkg)
pkg = os.path.join(tmpdir,pkg_name[1])
elif pkg == ".":
# Install from the current directory, try to guess
# how to install it properly:
if os.path.exists(expandvars("$CUR/spkg-install")):
setup_cpu(cpu_count)
try:
cmd("cd $CUR; /bin/bash spkg-install")
except CmdException:
print "Qsnake 'install .' exited with an error."
elif os.path.exists(expandvars("$CUR/setup.py")):
try:
cmd("cd $CUR; python setup.py install")
except CmdException:
print "Qsnake 'python setup.py install' exited with an error."
else:
print "Don't know how to install from the current directory."
return
else:
# Install the 'pkg' package
remote = False
try:
pkg = pkg_make_absolute(pkg)
except PackageNotFound, p:
print p
sys.exit(1)
if is_installed(pkg):
if not force_install:
print "Package '%s' is already installed" % pkg_make_relative(pkg)
return
if install_dependencies:
print "Installing dependencies for %s..." % pkg
for dep in get_dependencies(pkg):
install_package(dep, install_dependencies=False,
cpu_count=cpu_count)
qsnake_scripts = ["qsnake-env"]
setup_cpu(cpu_count)
# Create the standard POSIX directories:
for d in ["bin", "doc", "include", "lib", "man", "share"]:
cmd("mkdir -p $QSNAKE_ROOT/local/%s" % d)
for script in qsnake_scripts:
cmd("cp $QSNAKE_ROOT/spkg/base/%s $QSNAKE_ROOT/local/bin/" % script)
install_package_spkg(pkg)
cmd("touch $QSNAKE_ROOT/spkg/installed/%s" % pkg_make_relative(pkg))
print
print "Package '%s' installed." % pkg_make_relative(pkg)
if remote:
from shutil import rmtree
rmtree(tmpdir)
def is_installed(pkg):
if pkg in get_system_packages():
return True
pkg = pkg_make_relative(pkg)
candidates = glob(expandvars("$QSNAKE_ROOT/spkg/installed/%s" % pkg))
if len(candidates) == 1:
return True
elif len(candidates) == 0:
return False
else:
raise Exception("Internal error: got more candidates in is_installed")
def pkg_make_absolute(pkg):
if pkg.endswith(".spkg"):
if os.path.exists(pkg):
return os.path.abspath(pkg)
pkg_current = expandvars("$CUR/%s" % pkg)
if os.path.exists(pkg_current):
return pkg_current
raise PackageNotFound("Package '%s' not found in the current directory" % pkg)
candidates = glob(expandvars("$QSNAKE_ROOT/spkg/standard/*.spkg"))
if len(candidates) == 0:
raise PackageNotFound("Package '%s' not found" % pkg)
cands = []
for p in candidates:
name, version = extract_name_version_from_path(p)
if name == pkg:
return p
if pkg in name:
cands.append(p)
if len(cands) == 0:
raise PackageNotFound("Package '%s' not found" % pkg)
elif len(cands) == 1:
return cands[0]
print "Too many candidates:"
print " " + "\n ".join(cands)
raise PackageNotFound("Ambiguous package name.")
def pkg_make_relative(pkg):
pkg = pkg_make_absolute(pkg)
name, version = extract_name_version_from_path(pkg)
return name
def make_unique(l):
m = []
for item in l:
if item not in m:
m.append(item)
return m
def get_dependencies(pkg):
"""
Gets all (including indirect) dependencies for the package "pkg".
For simplicity, the dependency graph is currently hardwired in this
function.
"""
provided = get_system_packages()
if pkg in provided:
return []
pkg_name = pkg_make_relative(pkg)
dependency_graph = get_dependency_graph()
deps = []
for dep in dependency_graph.get(pkg_name, []):
if dep in provided:
continue
deps.extend(get_dependencies(dep))
deps.append(dep)
deps = make_unique(deps)
return deps
def build(cpu_count=0):
print "Building Qsnake"
# Only add the packages that you want to have in Qsnake. Don't add
# dependencies (those are handled in the get_dependencies() function)
packages_list = [
# Basics:
"git",
"libqsnake",
# SciPy stack
"ipython",
"scipy",
"sympy",
"matplotlib",
"h5py",
# PDE packages:
"fipy",
"sfepy",
"phaml",
# Electronic structure packages:
"gpaw",
"elk",
]
try:
for pkg in packages_list:
install_package(pkg, cpu_count=cpu_count)
print
print "Finished building Qsnake."
except PackageBuildFailed:
print
print "Qsnake build failed."
def wait_for_ctrl_c():
try:
while 1:
sleep(1)
except KeyboardInterrupt:
pass
def run_lab():
"""
Runs the html notebook.
"""
print "Starting Web GUI: Open your web browser at http://localhost:8888/"
print "Press CTRL+C to kill it"
print
from IPython.frontend.html.notebook.notebookapp import NotebookApp
app = NotebookApp()
# This option enables Matplotlib:
app.initialize(["--pylab=inline"])
app.start()
def extract_version(package_name):
"""
Extracts the version from the package_name.
The version is defined as one of the following:
-3245s
-ab434
-1.1-343s
-2.3-4
-134-minimal-24
but not:
-ab-13
-ab-ab
-m14-m16
The leading "-" is discarded.
Example:
>>> extract_version("jinja-2.5")
'2.5'
"""
def numeric(c):
if c in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]:
return True
return False
first_dash = package_name.find("-")
last_dash = package_name.rfind("-")
if first_dash == last_dash:
return package_name[first_dash+1:]
while not numeric(package_name[first_dash + 1]):
package_name = package_name[first_dash+1:]
first_dash = package_name.find("-")
last_dash = package_name.rfind("-")
if first_dash == last_dash:
return package_name[first_dash+1:]
return package_name[first_dash + 1:]
def extract_name_version(package_name):
"""
Extracts the name and the version.
Example:
>>> extract_name_version("jinja-2.5")
('jinja', '2.5')
"""
version = extract_version(package_name)
name = package_name[:-len(version)-1]
return name, version
def extract_name_version_from_path(p):
"""
Extracts the name and the version from the full path.
Example:
>> extract_name_version_from_path("/home/bla/jinja-2.5.spkg")
('jinja', '2.5')
"""
path, ext = os.path.splitext(p)
assert ext == ".spkg"
directory, filename = os.path.split(path)
return extract_name_version(filename)
def command_update():
print "Updating the git repository"
cmd("cd $QSNAKE_ROOT; git pull http://github.com/qsnake/qsnake.git master")
download_packages()
print "Done."
def command_list():
print "List of installed packages:"
cmd("cd $QSNAKE_ROOT; ls spkg/installed")
def command_develop():
print "Adding the current directory into qsnake.pth file:"
cmd("echo $CUR >> $SPKG_LOCAL/lib/python/site-packages/qsnake.pth",
echo=True)
def get_system_packages():
"""get a dict by platform of packages provided by the system."""
d = {}
d['darwin'] = [
'gnutls',
'openssl',
'termcap',
'zlib',
'bzip2',
'sqlite',
'uuid',
'lapack',
'curl'
]
return d.get(sys.platform, [])
def get_standard_packages():
from json import load
f = open(expandvars("$QSNAKE_ROOT/spkg/base/packages.json"))
data = load(f)
QSNAKE_STANDARD = "http://qsnake.googlecode.com/files"
spkg = []
git = []
provided = get_system_packages()
for p in data:
if p['name'] in provided:
print 'system provided: '+p['name']
continue
download = p["download"]
if download == "qsnake-spkg":
spkg.append(QSNAKE_STANDARD + "/" + p["name"] + "-" + \
p["version"] + ".spkg")
elif download == "qsnake-git":
git.append(p["name"])
else:
raise Exception("Unsupported 'download' field")
return spkg, git, provided
def get_dependency_graph():
from json import load
f = open(expandvars("$QSNAKE_ROOT/spkg/base/packages.json"))
data = load(f)
QSNAKE_STANDARD = "http://qsnake.googlecode.com/files"
graph = {}
for p in data:
graph[p["name"]] = p["dependencies"]
return graph
def verify_database():
print "Verifying the package database..."
try:
packages = get_standard_packages()
dependency_graph = get_dependency_graph()
for p in dependency_graph:
deps = dependency_graph[p]
for p2 in deps:
if not p2 in dependency_graph:
msg = "Dependency '%s' of the package '%s' doesn't exist"
raise Exception(msg % (p2, p))
print "OK"
except:
print "Failed."
print
print "More information about the error:"
raise
def erase_binary():
print "Deleting all installed files..."
cmd("rm -rf $QSNAKE_ROOT/local")
cmd("rm -rf $QSNAKE_ROOT/spkg/build")
cmd("rm -rf $QSNAKE_ROOT/spkg/installed")
print " Done."
def run_tests():
import qsnake
os.environ["MPLCONFIGDIR"] = expandvars("$QSNAKE_ROOT/spkg/base/matplotlib")
qsnake.test()
if __name__ == "__main__":
main()
| bsd-3-clause | 838,599,484,452,875,400 | 32.209046 | 118 | 0.577324 | false |
bparzella/secsgem | secsgem/secs/data_items/sdack.py | 1 | 1685 | #####################################################################
# sdack.py
#
# (c) Copyright 2021, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""SDACK data item."""
from .. import variables
from .base import DataItemBase
class SDACK(DataItemBase):
"""
Map setup acknowledge.
:Types: :class:`Binary <secsgem.secs.variables.Binary>`
:Length: 1
**Values**
+-------+---------------+--------------------------------------------+
| Value | Description | Constant |
+=======+===============+============================================+
| 0 | Received Data | :const:`secsgem.secs.data_items.SDACK.ACK` |
+-------+---------------+--------------------------------------------+
| 1-63 | Error | |
+-------+---------------+--------------------------------------------+
**Used In Function**
- :class:`SecsS12F02 <secsgem.secs.functions.SecsS12F02>`
"""
__type__ = variables.Binary
__count__ = 1
ACK = 0
| lgpl-2.1 | 4,805,233,631,875,200,000 | 36.444444 | 78 | 0.457567 | false |
Sakartu/stringinfo | stringinfo.py | 1 | 1798 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""
Usage:
stringinfo [options] [--] [STRING]...
Options:
STRING The strings for which you want information. If none are given, read from stdin upto EOF. Empty strings are ignored.
--list List all plugins, with their descriptions and whether they're default or not
--all Run all plugins, even the ones that aren't default
--verbose Print debugging messages
--file INFILE Read inputs from inputfile, removing trailing newlines. BEWARE: leading/trailing whitespace is preserved!
Plugins:
"""
import colorama
from docopt import docopt
import sys
import veryprettytable
import plugins
from plugins import color
__author__ = 'peter'
def main():
args = docopt(__doc__ + plugins.usage_table())
# Find plugins
ps = plugins.get_plugins(args)
if args['--list']:
table = veryprettytable.VeryPrettyTable()
table.field_names = ('Name', 'Default', 'Description')
table.align = 'l'
for p in ps:
table.add_row((p.__name__,
color(p.default),
p.description))
print(table)
return
if args['--file']:
args['STRING'] = [x.strip('\n\r') for x in open(args['--file'], 'r')]
if not args['STRING']:
args['STRING'] = [sys.stdin.read()]
filter(None, args['STRING'])
# Initialize colorama
colorama.init()
# For each plugin, check if it's applicable and if so, run it
for p in ps:
plugin = p(args)
if plugin.sentinel():
print(plugin.header)
print(plugin.handle())
else:
if args['--verbose']:
print('Sentinel failed for {0}'.format(p.__name__))
if __name__ == '__main__':
main() | mit | -4,475,708,929,865,140,700 | 25.850746 | 131 | 0.588432 | false |
Code4SA/mma-dexter | dexter/processing/crawlers/thecitizentz.py | 1 | 1629 | from urlparse import urlparse, urlunparse
import re
from bs4 import BeautifulSoup
import requests
import logging
from .base import BaseCrawler
from ...models import Entity, Author, AuthorType
class TheCitizenTZCrawler(BaseCrawler):
TCTZ = re.compile('(www\.)?thecitizen.co.tz')
log = logging.getLogger(__name__)
def offer(self, url):
""" Can this crawler process this URL? """
parts = urlparse(url)
return bool(self.TCTZ.match(parts.netloc))
def extract(self, doc, raw_html):
""" Extract text and other things from the raw_html for this document. """
super(TheCitizenTZCrawler, self).extract(doc, raw_html)
soup = BeautifulSoup(raw_html)
# gather title
doc.title = self.extract_plaintext(soup.select("article.main.column .story-view header h1"))
#gather publish date
date = self.extract_plaintext(soup.select("article.main.column .story-view header h5"))
doc.published_at = self.parse_timestamp(date)
#gather text and summary
nodes = soup.select("article.main.column .story-view .article .body-copy p")
if len(nodes) > 1:
doc.summary = self.extract_plaintext(nodes[1:2])
doc.text = "\n\n".join(p.text.strip() for p in nodes[1:])
# gather author
author = date = self.extract_plaintext(soup.select("article.main.column .story-view .article .author")).replace("By ", '').split('@')[0]
if author:
doc.author = Author.get_or_create(author.strip(), AuthorType.journalist())
else:
doc.author = Author.unknown()
| apache-2.0 | 2,409,094,208,924,812,000 | 34.413043 | 144 | 0.643953 | false |
deapplegate/wtgpipeline | blank.py | 1 | 4506 | def add_correction_new(cat_list,OBJNAME,FILTER,PPRUN):
import scipy, re, string, os
''' create chebychev polynomials '''
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda x,y:2*x**2-1},{'n':'3x','f':lambda x,y:4*x**3.-3*x}]
cheby_y = [{'n':'0y','f':lambda x,y:1.},{'n':'1y','f':lambda x,y:y},{'n':'2y','f':lambda x,y:2*y**2-1},{'n':'3y','f':lambda x,y:4*y**3.-3*y}]
cheby_terms = []
cheby_terms_no_linear = []
for tx in cheby_x:
for ty in cheby_y:
if not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
if not ((tx['n'] == '0x' and ty['n'] == '0y') or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms_no_linear.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
cov = 1
if cov:
samples = [['sdss',cheby_terms,True]] #,['nosdss',cheby_terms_no_linear,False]] #[['nosdss',cheby_terms_no_linear],['sdss',cheby_terms]]
else:
samples = [['nosdss',cheby_terms_no_linear,False]]
sample = 'sdss'
sample_size = 'all'
import re, time
dt = get_a_file(OBJNAME,FILTER,PPRUN)
d = get_fits(OBJNAME,FILTER,PPRUN)
print d.keys()
column_prefix = sample+'$'+sample_size+'$'
position_columns_names = re.split('\,',d[column_prefix + 'positioncolumns'])
print position_columns_names, 'position_columns_names'
fitvars = {}
cheby_terms_dict = {}
print column_prefix, position_columns_names
for ele in position_columns_names:
print ele
if type(ele) != type({}):
ele = {'name':ele}
res = re.split('$',ele['name'])
fitvars[ele['name']] = float(d[sample+'$'+sample_size+'$'+ele['name']])
for term in cheby_terms:
if term['n'] == ele['name'][2:]:
cheby_terms_dict[term['n']] = term
cheby_terms_use = [cheby_terms_dict[k] for k in cheby_terms_dict.keys()]
print cheby_terms_use, fitvars
CHIPS = [int(x) for x in re.split(',',dt['CHIPS'])]
LENGTH1, LENGTH2 = dt['LENGTH1'], dt['LENGTH2']
per_chip = True
coord_conv_x = lambda x:(2.*x-0-LENGTH1)/(LENGTH1-0)
coord_conv_y = lambda x:(2.*x-0-LENGTH2)/(LENGTH2-0)
''' make images of illumination corrections '''
for cat in cat_list:
for ROT in EXPS.keys():
for SUPA in EXPS[ROT]:
import re
print SUPA, cat
res = re.split('$',cat[1])
file = res[1]
print file, cat
if file == SUPA: rotation = ROT
print cat
p = pyfits.open(cat[0])
tab = p["OBJECTS"].data
print tab.field('MAG_AUTO')[0:10]
x = coord_conv_x(tab.field('Xpos_ABS'))
y = coord_conv_y(tab.field('Ypos_ABS'))
CHIPS = tab.field('CHIP')
chip_zps = []
for i in range(len(CHIPS)):
chip_zps.append(float(fitvars['zp_' + str(CHIPS[i])]))
chip_zps = scipy.array(chip_zps)
''' save pattern w/ chip zps '''
trial = False
children = []
x = coord_conv_x(x)
y = coord_conv_y(y)
''' correct w/ polynomial '''
epsilonC = 0
index = 0
for term in cheby_terms_use:
index += 1
print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilonC += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
''' add the zeropoint '''
epsilonC += chip_zps
''' save pattern w/o chip zps '''
print epsilon[0:20]
tab.field('MAG_AUTO')[:] = tab.field('MAG_AUTO')[:] - epsilonC
print tab.field('MAG_AUTO')[0:20]
new_name = cat[0].replace('.cat','.gradient.cat')
os.system('rm ' + new_name)
p.writeto(new_name)
cat_grads.append([new_name,cat[1]])
return cat_grads
| mit | 4,700,156,536,708,270,000 | 39.594595 | 146 | 0.458722 | false |
ageitgey/face_recognition | setup.py | 1 | 1703 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'face_recognition_models>=0.3.0',
'Click>=6.0',
'dlib>=19.7',
'numpy',
'Pillow'
]
test_requirements = [
'tox',
'flake8'
]
setup(
name='face_recognition',
version='1.4.0',
description="Recognize faces from Python or from the command line",
long_description=readme + '\n\n' + history,
author="Adam Geitgey",
author_email='[email protected]',
url='https://github.com/ageitgey/face_recognition',
packages=[
'face_recognition',
],
package_dir={'face_recognition': 'face_recognition'},
package_data={
'face_recognition': ['models/*.dat']
},
entry_points={
'console_scripts': [
'face_recognition=face_recognition.face_recognition_cli:main',
'face_detection=face_recognition.face_detection_cli:main'
]
},
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='face_recognition',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=test_requirements
)
| mit | -5,512,247,135,730,109,000 | 26.031746 | 74 | 0.603641 | false |
aiven/aiven-client | tests/test_argx.py | 1 | 1825 | # Copyright 2020, Aiven, https://aiven.io/
#
# This file is under the Apache License, Version 2.0.
# See the file `LICENSE` for details.
try:
from functools import cached_property
except ImportError:
cached_property = None
from aiven.client.argx import arg, CommandLineTool
class TestCLI(CommandLineTool):
@arg()
def xxx(self):
"""7"""
@arg()
def aaa(self):
"""1"""
@arg()
def ccc(self):
"""4"""
class SubCLI(CommandLineTool):
@arg()
def yyy(self):
"""8"""
@arg()
def bbb(self):
"""2"""
@arg()
def ddd(self):
"""5"""
class SubCLI2(CommandLineTool):
@arg()
def yyz(self):
"""9"""
@arg()
def bbc(self):
"""3"""
@arg()
def dde(self):
"""6"""
def test_extended_commands_remain_alphabetically_ordered():
cli = TestCLI("testcli")
cli.extend_commands(cli) # Force the CLI to have its full arg set at execution
sl2 = SubCLI2("subcli2")
sl = SubCLI("subcli")
cli.extend_commands(sl2)
cli.extend_commands(sl)
action_order = [item.dest for item in cli.subparsers._choices_actions] # pylint: disable=protected-access
assert action_order == ["aaa", "bbb", "bbc", "ccc", "ddd", "dde", "xxx", "yyy", "yyz"]
class DescriptorCLI(CommandLineTool):
@property
def raise1(self):
raise RuntimeError("evaluated raise1")
if cached_property is not None:
@cached_property
def raise2(self):
raise RuntimeError("evaluated raise2")
@arg("something")
def example_command(self):
"""Example command."""
def test_descriptors_are_not_eagerly_evaluated():
cli = DescriptorCLI("DescriptorCLI")
calls = []
cli.add_cmds(calls.append)
assert calls == [cli.example_command]
| apache-2.0 | -4,218,275,428,768,166,000 | 19.505618 | 110 | 0.592877 | false |
geotagx/geotagx-pybossa-archive | pybossa/stats.py | 1 | 14056 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask import current_app
from sqlalchemy.sql import text
from pybossa.core import db
from pybossa.cache import cache, memoize, ONE_DAY
from pybossa.model import TaskRun, Task
from pybossa.cache import FIVE_MINUTES, memoize
import string
import pygeoip
import operator
import datetime
import time
from datetime import timedelta
@memoize(timeout=ONE_DAY)
def get_task_runs(app_id):
"""Return all the Task Runs for a given app_id"""
task_runs = db.session.query(TaskRun).filter_by(app_id=app_id).all()
return task_runs
@memoize(timeout=ONE_DAY)
def get_tasks(app_id):
"""Return all the tasks for a given app_id"""
tasks = db.session.query(Task).filter_by(app_id=app_id).all()
return tasks
@memoize(timeout=ONE_DAY)
def get_avg_n_tasks(app_id):
"""Return the average number of answers expected per task,
and the number of tasks"""
sql = text('''SELECT COUNT(task.id) as n_tasks,
AVG(task.n_answers) AS "avg" FROM task
WHERE task.app_id=:app_id;''')
results = db.engine.execute(sql, app_id=app_id)
for row in results:
avg = float(row.avg)
total_n_tasks = row.n_tasks
return avg, total_n_tasks
@memoize(timeout=ONE_DAY)
def stats_users(app_id):
"""Return users's stats for a given app_id"""
users = {}
auth_users = []
anon_users = []
# Get Authenticated Users
sql = text('''SELECT task_run.user_id AS user_id,
COUNT(task_run.id) as n_tasks FROM task_run
WHERE task_run.user_id IS NOT NULL AND
task_run.user_ip IS NULL AND
task_run.app_id=:app_id
GROUP BY task_run.user_id ORDER BY n_tasks DESC
LIMIT 5;''')
results = db.engine.execute(sql, app_id=app_id)
for row in results:
auth_users.append([row.user_id, row.n_tasks])
sql = text('''SELECT count(distinct(task_run.user_id)) AS user_id FROM task_run
WHERE task_run.user_id IS NOT NULL AND
task_run.user_ip IS NULL AND
task_run.app_id=:app_id;''')
results = db.engine.execute(sql, app_id=app_id)
for row in results:
users['n_auth'] = row[0]
# Get all Anonymous Users
sql = text('''SELECT task_run.user_ip AS user_ip,
COUNT(task_run.id) as n_tasks FROM task_run
WHERE task_run.user_ip IS NOT NULL AND
task_run.user_id IS NULL AND
task_run.app_id=:app_id
GROUP BY task_run.user_ip ORDER BY n_tasks DESC;''')
results = db.engine.execute(sql, app_id=app_id)
for row in results:
anon_users.append([row.user_ip, row.n_tasks])
sql = text('''SELECT COUNT(DISTINCT(task_run.user_ip)) AS user_ip FROM task_run
WHERE task_run.user_ip IS NOT NULL AND
task_run.user_id IS NULL AND
task_run.app_id=:app_id;''')
results = db.engine.execute(sql, app_id=app_id)
for row in results:
users['n_anon'] = row[0]
return users, anon_users, auth_users
@memoize(timeout=ONE_DAY)
def stats_dates(app_id):
dates = {}
dates_anon = {}
dates_auth = {}
dates_n_tasks = {}
task_runs = get_task_runs(app_id)
avg, total_n_tasks = get_avg_n_tasks(app_id)
for tr in task_runs:
# Data for dates
date, hour = string.split(tr.finish_time, "T")
tr.finish_time = string.split(tr.finish_time, '.')[0]
hour = string.split(hour, ":")[0]
# Dates
if date in dates.keys():
dates[date] += 1
else:
dates[date] = 1
if date in dates_n_tasks.keys():
dates_n_tasks[date] = total_n_tasks * avg
else:
dates_n_tasks[date] = total_n_tasks * avg
if tr.user_id is None:
if date in dates_anon.keys():
dates_anon[date] += 1
else:
dates_anon[date] = 1
else:
if date in dates_auth.keys():
dates_auth[date] += 1
else:
dates_auth[date] = 1
return dates, dates_n_tasks, dates_anon, dates_auth
@memoize(timeout=ONE_DAY)
def stats_hours(app_id):
hours = {}
hours_anon = {}
hours_auth = {}
max_hours = 0
max_hours_anon = 0
max_hours_auth = 0
task_runs = get_task_runs(app_id)
# initialize hours keys
for i in range(0, 24):
hours[str(i).zfill(2)] = 0
hours_anon[str(i).zfill(2)] = 0
hours_auth[str(i).zfill(2)] = 0
for tr in task_runs:
# Hours
date, hour = string.split(tr.finish_time, "T")
tr.finish_time = string.split(tr.finish_time, '.')[0]
hour = string.split(hour, ":")[0]
if hour in hours.keys():
hours[hour] += 1
if (hours[hour] > max_hours):
max_hours = hours[hour]
if tr.user_id is None:
if hour in hours_anon.keys():
hours_anon[hour] += 1
if (hours_anon[hour] > max_hours_anon):
max_hours_anon = hours_anon[hour]
else:
if hour in hours_auth.keys():
hours_auth[hour] += 1
if (hours_auth[hour] > max_hours_auth):
max_hours_auth = hours_auth[hour]
return hours, hours_anon, hours_auth, max_hours, max_hours_anon, max_hours_auth
@memoize(timeout=ONE_DAY)
def stats_format_dates(app_id, dates, dates_n_tasks, dates_estimate,
dates_anon, dates_auth):
"""Format dates stats into a JSON format"""
dayNewStats = dict(label="Anon + Auth", values=[])
dayAvgAnswers = dict(label="Expected Answers", values=[])
dayEstimates = dict(label="Estimation", values=[])
dayTotalStats = dict(label="Total", disabled="True", values=[])
dayNewAnonStats = dict(label="Anonymous", values=[])
dayNewAuthStats = dict(label="Authenticated", values=[])
total = 0
for d in sorted(dates.keys()):
# JavaScript expects miliseconds since EPOCH
# New answers per day
dayNewStats['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000), dates[d]])
dayAvgAnswers['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000),
dates_n_tasks[d]])
# Total answers per day
total = total + dates[d]
dayTotalStats['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000), total])
# Anonymous answers per day
if d in (dates_anon.keys()):
dayNewAnonStats['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000),
dates_anon[d]])
else:
dayNewAnonStats['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000), 0])
# Authenticated answers per day
if d in (dates_auth.keys()):
dayNewAuthStats['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000),
dates_auth[d]])
else:
dayNewAuthStats['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000), 0])
for d in sorted(dates_estimate.keys()):
dayEstimates['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000),
dates_estimate[d]])
dayAvgAnswers['values'].append(
[int(time.mktime(time.strptime(d, "%Y-%m-%d")) * 1000),
dates_n_tasks.values()[0]])
return dayNewStats, dayNewAnonStats, dayNewAuthStats, \
dayTotalStats, dayAvgAnswers, dayEstimates
@memoize(timeout=ONE_DAY)
def stats_format_hours(app_id, hours, hours_anon, hours_auth,
max_hours, max_hours_anon, max_hours_auth):
"""Format hours stats into a JSON format"""
hourNewStats = dict(label="Anon + Auth", disabled="True", values=[], max=0)
hourNewAnonStats = dict(label="Anonymous", values=[], max=0)
hourNewAuthStats = dict(label="Authenticated", values=[], max=0)
hourNewStats['max'] = max_hours
hourNewAnonStats['max'] = max_hours_anon
hourNewAuthStats['max'] = max_hours_auth
for h in sorted(hours.keys()):
# New answers per hour
#hourNewStats['values'].append(dict(x=int(h), y=hours[h], size=hours[h]*10))
if (hours[h] != 0):
hourNewStats['values'].append([int(h), hours[h],
(hours[h] * 5) / max_hours])
else:
hourNewStats['values'].append([int(h), hours[h], 0])
# New Anonymous answers per hour
if h in hours_anon.keys():
#hourNewAnonStats['values'].append(dict(x=int(h), y=hours[h], size=hours_anon[h]*10))
if (hours_anon[h] != 0):
hourNewAnonStats['values'].append([int(h), hours_anon[h],
(hours_anon[h] * 5) / max_hours])
else:
hourNewAnonStats['values'].append([int(h), hours_anon[h], 0])
# New Authenticated answers per hour
if h in hours_auth.keys():
#hourNewAuthStats['values'].append(dict(x=int(h), y=hours[h], size=hours_auth[h]*10))
if (hours_auth[h] != 0):
hourNewAuthStats['values'].append([int(h), hours_auth[h],
(hours_auth[h] * 5) / max_hours])
else:
hourNewAuthStats['values'].append([int(h), hours_auth[h], 0])
return hourNewStats, hourNewAnonStats, hourNewAuthStats
@memoize(timeout=ONE_DAY)
def stats_format_users(app_id, users, anon_users, auth_users, geo=False):
"""Format User Stats into JSON"""
userStats = dict(label="User Statistics", values=[])
userAnonStats = dict(label="Anonymous Users", values=[], top5=[], locs=[])
userAuthStats = dict(label="Authenticated Users", values=[], top5=[])
userStats['values'].append(dict(label="Anonymous", value=[0, users['n_anon']]))
userStats['values'].append(dict(label="Authenticated", value=[0, users['n_auth']]))
for u in anon_users:
userAnonStats['values'].append(dict(label=u[0], value=[u[1]]))
for u in auth_users:
userAuthStats['values'].append(dict(label=u[0], value=[u[1]]))
# Get location for Anonymous users
top5_anon = []
top5_auth = []
loc_anon = []
# Check if the GeoLiteCity.dat exists
geolite = current_app.root_path + '/../dat/GeoLiteCity.dat'
if geo:
gic = pygeoip.GeoIP(geolite)
for u in anon_users:
if geo:
loc = gic.record_by_addr(u[0])
else:
loc = {}
if loc is None:
loc = {}
if (len(loc.keys()) == 0):
loc['latitude'] = 0
loc['longitude'] = 0
top5_anon.append(dict(ip=u[0], loc=loc, tasks=u[1]))
for u in anon_users:
if geo:
loc = gic.record_by_addr(u[0])
else:
loc = {}
if loc is None:
loc = {}
if (len(loc.keys()) == 0):
loc['latitude'] = 0
loc['longitude'] = 0
loc_anon.append(dict(ip=u[0], loc=loc, tasks=u[1]))
for u in auth_users:
sql = text('''SELECT name, fullname from "user" where id=:id;''')
results = db.engine.execute(sql, id=u[0])
for row in results:
fullname = row.fullname
name = row.name
top5_auth.append(dict(name=name, fullname=fullname, tasks=u[1]))
userAnonStats['top5'] = top5_anon[0:5]
userAnonStats['locs'] = loc_anon
userAuthStats['top5'] = top5_auth
return dict(users=userStats, anon=userAnonStats, auth=userAuthStats,
n_anon=users['n_anon'], n_auth=users['n_auth'])
@memoize(timeout=ONE_DAY)
def get_stats(app_id, geo=False):
"""Return the stats a given app"""
hours, hours_anon, hours_auth, max_hours, \
max_hours_anon, max_hours_auth = stats_hours(app_id)
users, anon_users, auth_users = stats_users(app_id)
dates, dates_n_tasks, dates_anon, dates_auth = stats_dates(app_id)
avg, total_n_tasks = get_avg_n_tasks(app_id)
sorted_answers = sorted(dates.iteritems(), key=operator.itemgetter(0))
if len(sorted_answers) > 0:
last_day = datetime.datetime.strptime(sorted_answers[-1][0], "%Y-%m-%d")
total_answers = sum(dates.values())
if len(dates) > 0:
avg_answers_per_day = total_answers / len(dates)
required_days_to_finish = ((avg * total_n_tasks) - total_answers) / avg_answers_per_day
pace = total_answers
dates_estimate = {}
for i in range(0, int(required_days_to_finish) + 2):
tmp = last_day + timedelta(days=(i))
tmp_str = tmp.date().strftime('%Y-%m-%d')
dates_estimate[tmp_str] = pace
pace = pace + avg_answers_per_day
dates_stats = stats_format_dates(app_id, dates, dates_n_tasks, dates_estimate,
dates_anon, dates_auth)
hours_stats = stats_format_hours(app_id, hours, hours_anon, hours_auth,
max_hours, max_hours_anon, max_hours_auth)
users_stats = stats_format_users(app_id, users, anon_users, auth_users, geo)
return dates_stats, hours_stats, users_stats
| agpl-3.0 | 3,382,761,236,077,615,600 | 34.494949 | 97 | 0.57712 | false |
rclmenezes/sqlalchemy | examples/vertical/dictlike.py | 1 | 7696 | """Mapping a vertical table as a dictionary.
This example illustrates accessing and modifying a "vertical" (or
"properties", or pivoted) table via a dict-like interface. These are tables
that store free-form object properties as rows instead of columns. For
example, instead of::
# A regular ("horizontal") table has columns for 'species' and 'size'
Table('animal', metadata,
Column('id', Integer, primary_key=True),
Column('species', Unicode),
Column('size', Unicode))
A vertical table models this as two tables: one table for the base or parent
entity, and another related table holding key/value pairs::
Table('animal', metadata,
Column('id', Integer, primary_key=True))
# The properties table will have one row for a 'species' value, and
# another row for the 'size' value.
Table('properties', metadata
Column('animal_id', Integer, ForeignKey('animal.id'),
primary_key=True),
Column('key', UnicodeText),
Column('value', UnicodeText))
Because the key/value pairs in a vertical scheme are not fixed in advance,
accessing them like a Python dict can be very convenient. The example below
can be used with many common vertical schemas as-is or with minor adaptations.
"""
class VerticalProperty(object):
"""A key/value pair.
This class models rows in the vertical table.
"""
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
return '<%s %r=%r>' % (self.__class__.__name__, self.key, self.value)
class VerticalPropertyDictMixin(object):
"""Adds obj[key] access to a mapped class.
This is a mixin class. It can be inherited from directly, or included
with multiple inheritence.
Classes using this mixin must define two class properties::
_property_type:
The mapped type of the vertical key/value pair instances. Will be
invoked with two positional arugments: key, value
_property_mapping:
A string, the name of the Python attribute holding a dict-based
relationship of _property_type instances.
Using the VerticalProperty class above as an example,::
class MyObj(VerticalPropertyDictMixin):
_property_type = VerticalProperty
_property_mapping = 'props'
mapper(MyObj, sometable, properties={
'props': relationship(VerticalProperty,
collection_class=attribute_mapped_collection('key'))})
Dict-like access to MyObj is proxied through to the 'props' relationship::
myobj['key'] = 'value'
# ...is shorthand for:
myobj.props['key'] = VerticalProperty('key', 'value')
myobj['key'] = 'updated value']
# ...is shorthand for:
myobj.props['key'].value = 'updated value'
print myobj['key']
# ...is shorthand for:
print myobj.props['key'].value
"""
_property_type = VerticalProperty
_property_mapping = None
__map = property(lambda self: getattr(self, self._property_mapping))
def __getitem__(self, key):
return self.__map[key].value
def __setitem__(self, key, value):
property = self.__map.get(key, None)
if property is None:
self.__map[key] = self._property_type(key, value)
else:
property.value = value
def __delitem__(self, key):
del self.__map[key]
def __contains__(self, key):
return key in self.__map
# Implement other dict methods to taste. Here are some examples:
def keys(self):
return self.__map.keys()
def values(self):
return [prop.value for prop in self.__map.values()]
def items(self):
return [(key, prop.value) for key, prop in self.__map.items()]
def __iter__(self):
return iter(self.keys())
if __name__ == '__main__':
from sqlalchemy import (MetaData, Table, Column, Integer, Unicode,
ForeignKey, UnicodeText, and_, not_, create_engine)
from sqlalchemy.orm import mapper, relationship, Session
from sqlalchemy.orm.collections import attribute_mapped_collection
metadata = MetaData()
# Here we have named animals, and a collection of facts about them.
animals = Table('animal', metadata,
Column('id', Integer, primary_key=True),
Column('name', Unicode(100)))
facts = Table('facts', metadata,
Column('animal_id', Integer, ForeignKey('animal.id'),
primary_key=True),
Column('key', Unicode(64), primary_key=True),
Column('value', UnicodeText, default=None),)
class AnimalFact(VerticalProperty):
"""A fact about an animal."""
class Animal(VerticalPropertyDictMixin):
"""An animal.
Animal facts are available via the 'facts' property or by using
dict-like accessors on an Animal instance::
cat['color'] = 'calico'
# or, equivalently:
cat.facts['color'] = AnimalFact('color', 'calico')
"""
_property_type = AnimalFact
_property_mapping = 'facts'
def __init__(self, name):
self.name = name
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.name)
mapper(Animal, animals, properties={
'facts': relationship(
AnimalFact, backref='animal',
collection_class=attribute_mapped_collection('key')),
})
mapper(AnimalFact, facts)
engine = create_engine("sqlite://")
metadata.create_all(engine)
session = Session(bind=engine)
stoat = Animal(u'stoat')
stoat[u'color'] = u'reddish'
stoat[u'cuteness'] = u'somewhat'
# dict-like assignment transparently creates entries in the
# stoat.facts collection:
print stoat.facts[u'color']
session.add(stoat)
session.commit()
critter = session.query(Animal).filter(Animal.name == u'stoat').one()
print critter[u'color']
print critter[u'cuteness']
critter[u'cuteness'] = u'very'
print 'changing cuteness:'
engine.echo = True
session.commit()
engine.echo = False
marten = Animal(u'marten')
marten[u'color'] = u'brown'
marten[u'cuteness'] = u'somewhat'
session.add(marten)
shrew = Animal(u'shrew')
shrew[u'cuteness'] = u'somewhat'
shrew[u'poisonous-part'] = u'saliva'
session.add(shrew)
loris = Animal(u'slow loris')
loris[u'cuteness'] = u'fairly'
loris[u'poisonous-part'] = u'elbows'
session.add(loris)
session.commit()
q = (session.query(Animal).
filter(Animal.facts.any(
and_(AnimalFact.key == u'color',
AnimalFact.value == u'reddish'))))
print 'reddish animals', q.all()
# Save some typing by wrapping that up in a function:
with_characteristic = lambda key, value: and_(AnimalFact.key == key,
AnimalFact.value == value)
q = (session.query(Animal).
filter(Animal.facts.any(
with_characteristic(u'color', u'brown'))))
print 'brown animals', q.all()
q = (session.query(Animal).
filter(not_(Animal.facts.any(
with_characteristic(u'poisonous-part', u'elbows')))))
print 'animals without poisonous-part == elbows', q.all()
q = (session.query(Animal).
filter(Animal.facts.any(AnimalFact.value == u'somewhat')))
print 'any animal with any .value of "somewhat"', q.all()
# Facts can be queried as well.
q = (session.query(AnimalFact).
filter(with_characteristic(u'cuteness', u'very')))
print 'just the facts', q.all()
| mit | -6,652,649,003,940,161,000 | 30.284553 | 80 | 0.618243 | false |
OpenAcademy-OpenStack/nova-scheduler | nova/api/openstack/compute/plugins/v3/evacuate.py | 1 | 3721 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import evacuate
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import utils
LOG = logging.getLogger(__name__)
ALIAS = "os-evacuate"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class EvacuateController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(EvacuateController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.host_api = compute.HostAPI()
@extensions.expected_errors((400, 404, 409))
@wsgi.action('evacuate')
@validation.schema(evacuate.evacuate)
def _evacuate(self, req, id, body):
"""
Permit admins to evacuate a server from a failed host
to a new one.
"""
context = req.environ["nova.context"]
authorize(context)
evacuate_body = body["evacuate"]
host = evacuate_body["host"]
on_shared_storage = strutils.bool_from_string(
evacuate_body["on_shared_storage"])
password = None
if 'admin_password' in evacuate_body:
# check that if requested to evacuate server on shared storage
# password not specified
if on_shared_storage:
msg = _("admin password can't be changed on existing disk")
raise exc.HTTPBadRequest(explanation=msg)
password = evacuate_body['admin_password']
elif not on_shared_storage:
password = utils.generate_password()
try:
self.host_api.service_get_by_compute_host(context, host)
except exception.NotFound:
msg = _("Compute host %s not found.") % host
raise exc.HTTPNotFound(explanation=msg)
try:
instance = self.compute_api.get(context, id)
self.compute_api.evacuate(context, instance, host,
on_shared_storage, password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceInUse as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return {'admin_password': password}
class Evacuate(extensions.V3APIExtensionBase):
"""Enables server evacuation."""
name = "Evacuate"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = EvacuateController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 | 1,858,886,254,443,595,000 | 35.126214 | 79 | 0.658425 | false |
derwentx/WooGenerator | tests/conftest.py | 1 | 1338 | from context import TESTS_DATA_DIR, get_testdata, woogenerator
import unittest
# import argparse
import pytest
import logging
from woogenerator.namespace.core import (
MatchNamespace, ParserNamespace, SettingsNamespaceProto, UpdateNamespace
)
from woogenerator.conf.parser import ArgumentParserCommon, ArgumentParserProd
from woogenerator.utils import Registrar, TimeUtils
def pytest_addoption(parser):
# parser.addoption("--enable-debug", action="store_true", default=False, help="debug tests")
parser.addoption("--run-slow", action="store_true", default=False, help="run slow tests")
parser.addoption("--run-local", action="store_true", default=False, help="run local tests")
def pytest_collection_modifyitems(config, items):
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
skip_local = pytest.mark.skip(reason="need --runlocal option to tun")
if not config.getoption("--run-slow"):
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
if not config.getoption('--run-local'):
for item in items:
if "local" in item.keywords:
item.add_marker(skip_local)
@pytest.fixture(scope="class")
def debug(request):
response = request.config.getoption("--debug")
request.cls.debug = response
| gpl-2.0 | 6,630,934,515,018,642,000 | 39.545455 | 96 | 0.710015 | false |
bmihelac/django-cruds | tests/test_integration.py | 1 | 1639 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test.testcases import TestCase
from tests.testapp.models import (
Author,
)
class TestIntegration(TestCase):
def setUp(self):
self.author = Author.objects.create(name='Foo')
def test_list(self):
response = self.client.get('/testapp/author/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Foo')
def test_create(self):
response = self.client.get('/testapp/author/new/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/testapp/author/new/', {
'name': 'Bar',
})
instance = Author.objects.filter(name='Bar').get()
self.assertRedirects(response, '/testapp/author/%s/' % instance.pk)
def test_detail(self):
response = self.client.get('/testapp/author/%s/' %
self.author.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Foo')
def test_update(self):
url = '/testapp/author/%s/edit/' % self.author.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, {
'name': 'Fooz',
})
self.assertRedirects(response, '/testapp/author/%s/' % self.author.pk)
def test_delete(self):
url = '/testapp/author/%s/remove/' % self.author.pk
response = self.client.post(url)
self.assertEqual(Author.objects.count(), 0)
self.assertRedirects(response, '/testapp/author/')
| bsd-3-clause | 394,007,786,364,623,360 | 31.137255 | 78 | 0.610738 | false |
googleads/google-ads-python | google/ads/googleads/v8/resources/types/detail_placement_view.py | 1 | 2839 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import (
placement_type as gage_placement_type,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"DetailPlacementView",},
)
class DetailPlacementView(proto.Message):
r"""A view with metrics aggregated by ad group and URL or YouTube
video.
Attributes:
resource_name (str):
Output only. The resource name of the detail placement view.
Detail placement view resource names have the form:
``customers/{customer_id}/detailPlacementViews/{ad_group_id}~{base64_placement}``
placement (str):
Output only. The automatic placement string
at detail level, e. g. website URL, mobile
application ID, or a YouTube video ID.
display_name (str):
Output only. The display name is URL name for
websites, YouTube video name for YouTube videos,
and translated mobile app name for mobile apps.
group_placement_target_url (str):
Output only. URL of the group placement, e.g.
domain, link to the mobile application in app
store, or a YouTube channel URL.
target_url (str):
Output only. URL of the placement, e.g.
website, link to the mobile application in app
store, or a YouTube video URL.
placement_type (google.ads.googleads.v8.enums.types.PlacementTypeEnum.PlacementType):
Output only. Type of the placement, e.g.
Website, YouTube Video, and Mobile Application.
"""
resource_name = proto.Field(proto.STRING, number=1,)
placement = proto.Field(proto.STRING, number=7, optional=True,)
display_name = proto.Field(proto.STRING, number=8, optional=True,)
group_placement_target_url = proto.Field(
proto.STRING, number=9, optional=True,
)
target_url = proto.Field(proto.STRING, number=10, optional=True,)
placement_type = proto.Field(
proto.ENUM,
number=6,
enum=gage_placement_type.PlacementTypeEnum.PlacementType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -1,632,002,444,791,074,800 | 36.853333 | 93 | 0.66925 | false |
heltonbiker/MapComplete | PyQt/FeatureDemos/helloQtDrawing.py | 1 | 9092 | #!/usr/bin/env python
# __author__ = 'helton'
import sip
sip.setapi('QVariant', 2)
from math import cos, pi, sin
from PyQt4 import QtCore, QtGui
class RenderArea(QtGui.QWidget):
def __init__(self, path, parent=None):
super(RenderArea, self).__init__(parent)
self.path = path
self.penWidth = 1
self.rotationAngle = 0
self.setBackgroundRole(QtGui.QPalette.Base)
def minimumSizeHint(self):
return QtCore.QSize(50, 50)
def sizeHint(self):
return QtCore.QSize(100, 100)
def setFillRule(self, rule):
self.path.setFillRule(rule)
self.update()
def setFillGradient(self, color1, color2):
self.fillColor1 = color1
self.fillColor2 = color2
self.update()
def setPenWidth(self, width):
self.penWidth = width
self.update()
def setPenColor(self, color):
self.penColor = color
self.update()
def setRotationAngle(self, degrees):
self.rotationAngle = degrees
self.update()
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.scale(self.width() / 100.0, self.height() / 100.0)
painter.translate(50.0, 50.0)
painter.rotate(-self.rotationAngle)
painter.translate(-50.0, -50.0)
painter.setPen(QtGui.QPen(self.penColor, self.penWidth,
QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
gradient = QtGui.QLinearGradient(0, 0, 0, 100)
gradient.setColorAt(0.0, self.fillColor1)
gradient.setColorAt(1.0, self.fillColor2)
painter.setBrush(QtGui.QBrush(gradient))
painter.drawPath(self.path)
class Window(QtGui.QWidget):
NumRenderAreas = 9
def __init__(self):
super(Window, self).__init__()
rectPath = QtGui.QPainterPath()
rectPath.moveTo(20.0, 30.0)
rectPath.lineTo(80.0, 30.0)
rectPath.lineTo(80.0, 70.0)
rectPath.lineTo(20.0, 70.0)
rectPath.closeSubpath()
roundRectPath = QtGui.QPainterPath()
roundRectPath.moveTo(80.0, 35.0)
roundRectPath.arcTo(70.0, 30.0, 10.0, 10.0, 0.0, 90.0)
roundRectPath.lineTo(25.0, 30.0)
roundRectPath.arcTo(20.0, 30.0, 10.0, 10.0, 90.0, 90.0)
roundRectPath.lineTo(20.0, 65.0)
roundRectPath.arcTo(20.0, 60.0, 10.0, 10.0, 180.0, 90.0)
roundRectPath.lineTo(75.0, 70.0)
roundRectPath.arcTo(70.0, 60.0, 10.0, 10.0, 270.0, 90.0)
roundRectPath.closeSubpath()
ellipsePath = QtGui.QPainterPath()
ellipsePath.moveTo(80.0, 50.0)
ellipsePath.arcTo(20.0, 30.0, 60.0, 40.0, 0.0, 360.0)
piePath = QtGui.QPainterPath()
piePath.moveTo(50.0, 50.0)
piePath.lineTo(65.0, 32.6795)
piePath.arcTo(20.0, 30.0, 60.0, 40.0, 60.0, 240.0)
piePath.closeSubpath()
polygonPath = QtGui.QPainterPath()
polygonPath.moveTo(10.0, 80.0)
polygonPath.lineTo(20.0, 10.0)
polygonPath.lineTo(80.0, 30.0)
polygonPath.lineTo(90.0, 70.0)
polygonPath.closeSubpath()
groupPath = QtGui.QPainterPath()
groupPath.moveTo(60.0, 40.0)
groupPath.arcTo(20.0, 20.0, 40.0, 40.0, 0.0, 360.0)
groupPath.moveTo(40.0, 40.0)
groupPath.lineTo(40.0, 80.0)
groupPath.lineTo(80.0, 80.0)
groupPath.lineTo(80.0, 40.0)
groupPath.closeSubpath()
textPath = QtGui.QPainterPath()
timesFont = QtGui.QFont("Times", 50)
timesFont.setStyleStrategy(QtGui.QFont.ForceOutline)
textPath.addText(10, 70, timesFont, "Qt")
bezierPath = QtGui.QPainterPath()
bezierPath.moveTo(20, 30)
bezierPath.cubicTo(80, 0, 50, 50, 80, 80)
starPath = QtGui.QPainterPath()
starPath.moveTo(90, 50)
for i in range(1, 5):
starPath.lineTo(50 + 40 * cos(0.8 * i * pi),
50 + 40 * sin(0.8 * i * pi))
starPath.closeSubpath()
self.renderAreas = [RenderArea(rectPath), RenderArea(roundRectPath),
RenderArea(ellipsePath), RenderArea(piePath),
RenderArea(polygonPath), RenderArea(groupPath),
RenderArea(textPath), RenderArea(bezierPath),
RenderArea(starPath)]
assert len(self.renderAreas) == 9
self.fillRuleComboBox = QtGui.QComboBox()
self.fillRuleComboBox.addItem("Odd Even", QtCore.Qt.OddEvenFill)
self.fillRuleComboBox.addItem("Winding", QtCore.Qt.WindingFill)
fillRuleLabel = QtGui.QLabel("Fill &Rule:")
fillRuleLabel.setBuddy(self.fillRuleComboBox)
self.fillColor1ComboBox = QtGui.QComboBox()
self.populateWithColors(self.fillColor1ComboBox)
self.fillColor1ComboBox.setCurrentIndex(
self.fillColor1ComboBox.findText("mediumslateblue"))
self.fillColor2ComboBox = QtGui.QComboBox()
self.populateWithColors(self.fillColor2ComboBox)
self.fillColor2ComboBox.setCurrentIndex(
self.fillColor2ComboBox.findText("cornsilk"))
fillGradientLabel = QtGui.QLabel("&Fill Gradient:")
fillGradientLabel.setBuddy(self.fillColor1ComboBox)
fillToLabel = QtGui.QLabel("to")
fillToLabel.setSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
self.penWidthSpinBox = QtGui.QSpinBox()
self.penWidthSpinBox.setRange(0, 20)
penWidthLabel = QtGui.QLabel("&Pen Width:")
penWidthLabel.setBuddy(self.penWidthSpinBox)
self.penColorComboBox = QtGui.QComboBox()
self.populateWithColors(self.penColorComboBox)
self.penColorComboBox.setCurrentIndex(
self.penColorComboBox.findText('darkslateblue'))
penColorLabel = QtGui.QLabel("Pen &Color:")
penColorLabel.setBuddy(self.penColorComboBox)
self.rotationAngleSpinBox = QtGui.QSpinBox()
self.rotationAngleSpinBox.setRange(0, 359)
self.rotationAngleSpinBox.setWrapping(True)
self.rotationAngleSpinBox.setSuffix('\xB0')
rotationAngleLabel = QtGui.QLabel("&Rotation Angle:")
rotationAngleLabel.setBuddy(self.rotationAngleSpinBox)
self.fillRuleComboBox.activated.connect(self.fillRuleChanged)
self.fillColor1ComboBox.activated.connect(self.fillGradientChanged)
self.fillColor2ComboBox.activated.connect(self.fillGradientChanged)
self.penColorComboBox.activated.connect(self.penColorChanged)
for i in range(Window.NumRenderAreas):
self.penWidthSpinBox.valueChanged.connect(self.renderAreas[i].setPenWidth)
self.rotationAngleSpinBox.valueChanged.connect(self.renderAreas[i].setRotationAngle)
topLayout = QtGui.QGridLayout()
for i in range(Window.NumRenderAreas):
topLayout.addWidget(self.renderAreas[i], i / 3, i % 3)
mainLayout = QtGui.QGridLayout()
mainLayout.addLayout(topLayout, 0, 0, 1, 4)
mainLayout.addWidget(fillRuleLabel, 1, 0)
mainLayout.addWidget(self.fillRuleComboBox, 1, 1, 1, 3)
mainLayout.addWidget(fillGradientLabel, 2, 0)
mainLayout.addWidget(self.fillColor1ComboBox, 2, 1)
mainLayout.addWidget(fillToLabel, 2, 2)
mainLayout.addWidget(self.fillColor2ComboBox, 2, 3)
mainLayout.addWidget(penWidthLabel, 3, 0)
mainLayout.addWidget(self.penWidthSpinBox, 3, 1, 1, 3)
mainLayout.addWidget(penColorLabel, 4, 0)
mainLayout.addWidget(self.penColorComboBox, 4, 1, 1, 3)
mainLayout.addWidget(rotationAngleLabel, 5, 0)
mainLayout.addWidget(self.rotationAngleSpinBox, 5, 1, 1, 3)
self.setLayout(mainLayout)
self.fillRuleChanged()
self.fillGradientChanged()
self.penColorChanged()
self.penWidthSpinBox.setValue(2)
self.setWindowTitle("Painter Paths")
def fillRuleChanged(self):
rule = QtCore.Qt.FillRule(self.currentItemData(self.fillRuleComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillRule(rule)
def fillGradientChanged(self):
color1 = QtGui.QColor(self.currentItemData(self.fillColor1ComboBox))
color2 = QtGui.QColor(self.currentItemData(self.fillColor2ComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillGradient(color1, color2)
def penColorChanged(self):
color = QtGui.QColor(self.currentItemData(self.penColorComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setPenColor(color)
def populateWithColors(self, comboBox):
colorNames = QtGui.QColor.colorNames()
for name in colorNames:
comboBox.addItem(name, name)
def currentItemData(self, comboBox):
return comboBox.itemData(comboBox.currentIndex())
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| mit | -1,600,070,330,208,654,300 | 34.24031 | 96 | 0.648152 | false |
openstack/python-mistralclient | mistralclient/tests/unit/v2/test_cli_cron_triggers.py | 1 | 6080 | # Copyright 2014 Mirantis, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from mistralclient.api.v2 import cron_triggers
from mistralclient.commands.v2 import cron_triggers as cron_triggers_cmd
from mistralclient.tests.unit import base
TRIGGER_DICT = {
'name': 'my_trigger',
'workflow_name': 'flow1',
'workflow_input': {},
'workflow_params': {},
'pattern': '* * * * *',
'next_execution_time': '4242-12-20 13:37',
'remaining_executions': 5,
'created_at': '1',
'updated_at': '1'
}
TRIGGER = cron_triggers.CronTrigger(mock, TRIGGER_DICT)
class TestCLITriggersV2(base.BaseCommandTest):
@mock.patch('mistralclient.commands.v2.cron_triggers.Create.'
'_convert_time_string_to_utc')
@mock.patch('argparse.open', create=True)
def test_create(self, mock_open, mock_convert):
self.client.cron_triggers.create.return_value = TRIGGER
mock_open.return_value = mock.MagicMock(spec=open)
result = self.call(
cron_triggers_cmd.Create,
app_args=['my_trigger', 'flow1', '--pattern', '* * * * *',
'--params', '{}', '--count', '5', '--first-time',
'4242-12-20 13:37', '--utc']
)
mock_convert.assert_not_called()
self.assertEqual(
(
'my_trigger', 'flow1', {}, '* * * * *',
'4242-12-20 13:37', 5, '1', '1'
),
result[1]
)
@mock.patch('mistralclient.commands.v2.cron_triggers.Create.'
'_convert_time_string_to_utc')
@mock.patch('argparse.open', create=True)
def test_create_no_utc(self, mock_open, mock_convert):
self.client.cron_triggers.create.return_value = TRIGGER
mock_open.return_value = mock.MagicMock(spec=open)
mock_convert.return_value = '4242-12-20 18:37'
result = self.call(
cron_triggers_cmd.Create,
app_args=['my_trigger', 'flow1', '--pattern', '* * * * *',
'--params', '{}', '--count', '5', '--first-time',
'4242-12-20 13:37']
)
mock_convert.assert_called_once_with('4242-12-20 13:37')
self.client.cron_triggers.create.assert_called_once_with(
'my_trigger', 'flow1', {}, {}, '* * * * *', '4242-12-20 18:37', 5)
self.assertEqual(
(
'my_trigger', 'flow1', {}, '* * * * *',
'4242-12-20 13:37', 5, '1', '1'
),
result[1]
)
@mock.patch('mistralclient.commands.v2.cron_triggers.time')
def test_convert_time_string_to_utc_from_utc(self, mock_time):
cmd = cron_triggers_cmd.Create(self.app, None)
mock_time.daylight = 0
mock_time.altzone = 0
mock_time.timezone = 0
mock_localtime = mock.Mock()
mock_localtime.tm_isdst = 0
mock_time.localtime.return_value = mock_localtime
utc_value = cmd._convert_time_string_to_utc('4242-12-20 13:37')
expected_time = '4242-12-20 13:37'
self.assertEqual(expected_time, utc_value)
@mock.patch('mistralclient.commands.v2.cron_triggers.time')
def test_convert_time_string_to_utc_from_dst(self, mock_time):
cmd = cron_triggers_cmd.Create(self.app, None)
mock_time.daylight = 1
mock_time.altzone = (4 * 60 * 60)
mock_time.timezone = (5 * 60 * 60)
mock_localtime = mock.Mock()
mock_localtime.tm_isdst = 1
mock_time.localtime.return_value = mock_localtime
utc_value = cmd._convert_time_string_to_utc('4242-12-20 13:37')
expected_time = '4242-12-20 17:37'
self.assertEqual(expected_time, utc_value)
@mock.patch('mistralclient.commands.v2.cron_triggers.time')
def test_convert_time_string_to_utc_no_dst(self, mock_time):
cmd = cron_triggers_cmd.Create(self.app, None)
mock_time.daylight = 1
mock_time.altzone = (4 * 60 * 60)
mock_time.timezone = (5 * 60 * 60)
mock_localtime = mock.Mock()
mock_localtime.tm_isdst = 0
mock_time.localtime.return_value = mock_localtime
utc_value = cmd._convert_time_string_to_utc('4242-12-20 13:37')
expected_time = '4242-12-20 18:37'
self.assertEqual(expected_time, utc_value)
def test_list(self):
self.client.cron_triggers.list.return_value = [TRIGGER]
result = self.call(cron_triggers_cmd.List)
self.assertEqual(
[(
'my_trigger', 'flow1', {}, '* * * * *',
'4242-12-20 13:37', 5, '1', '1'
)],
result[1]
)
def test_get(self):
self.client.cron_triggers.get.return_value = TRIGGER
result = self.call(cron_triggers_cmd.Get, app_args=['name'])
self.assertEqual(
(
'my_trigger', 'flow1', {}, '* * * * *',
'4242-12-20 13:37', 5, '1', '1'
),
result[1]
)
def test_delete(self):
self.call(cron_triggers_cmd.Delete, app_args=['name'])
self.client.cron_triggers.delete.assert_called_once_with('name')
def test_delete_with_multi_names(self):
self.call(cron_triggers_cmd.Delete, app_args=['name1', 'name2'])
self.assertEqual(2, self.client.cron_triggers.delete.call_count)
self.assertEqual(
[mock.call('name1'), mock.call('name2')],
self.client.cron_triggers.delete.call_args_list
)
| apache-2.0 | -6,033,300,257,521,430,000 | 33.157303 | 78 | 0.577138 | false |
Irrialite/YouTune | youtune/api/resources.py | 1 | 18770 | from datetime import date, datetime
import hashlib, inspect
from django.db.models import Q
from django.contrib.auth import authenticate, login, logout, models as auth_models
from django.contrib.auth.hashers import make_password
from django.conf.urls import url
from django.utils import timezone
from tastypie import resources, fields
from tastypie.authentication import Authentication
from tastypie.authorization import Authorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.serializers import Serializer
from tastypie.utils import trailing_slash
from tastypie.http import HttpUnauthorized, HttpForbidden
from youtune.account import models, forms
from youtune.api.helpers import FieldsValidation
from youtune.api.authorization import UserObjectsOnlyAuthorization
from youtune.fileupload import models as file_models
class CommentDateSerializer(Serializer):
def format_datetime(self, data):
if self.datetime_formatting == 'rfc-2822':
return super(CommentDateSerializer, self).format_datetime(data)
return data.isoformat()
class UserProfileResource(resources.ModelResource):
id = fields.IntegerField(attribute="id", null=True)
class Meta:
queryset = models.UserProfile.objects.all()
resource_name = 'userprofile'
# TODO:
# Add custom Authorization (important)
authentication = Authentication()
authorization = Authorization()
# excludes = ['email', 'is_staff', 'is_superuser']
filtering = {
'username': ALL
}
def dehydrate_password(self, bundle):
return ''
def dehydrate(self, bundle):
if bundle.request.user.pk == bundle.obj.pk:
bundle.data['email'] = bundle.obj.email
bundle.data['is_staff'] = bundle.obj.is_staff
bundle.data['is_superuser'] = bundle.obj.is_superuser
model = bundle.obj.channel
ret = {}
for f in sorted(model._meta.fields + model._meta.many_to_many):
ret[f.name] = getattr(model, f.name)
bundle.data['channel'] = ret
return bundle
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/login%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('login'), name="api_login"),
url(r'^(?P<resource_name>%s)/logout%s$' %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('logout'), name='api_logout'),
url(r'^(?P<resource_name>%s)/loggedin%s$' %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('loggedin'), name='api_loggedin'),
url(r'^(?P<resource_name>%s)/checkfordupe%s$' %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('checkfordupe'), name='api_checkfordupe'),
url(r'^(?P<resource_name>%s)/update%s$' %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('update'), name='api_update'),
url(r'^(?P<resource_name>%s)/count%s$' %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('count'), name='api_count'),
]
def login(self, request, **kwargs):
self.method_check(request, allowed=['post'])
data = self.deserialize(request, request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
username = data.get('username', '')
password = data.get('password', '')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return self.create_response(request, {
'success': True
})
else:
return self.create_response(request, {
'success': False,
'reason': 'disabled',
}, HttpForbidden)
else:
return self.create_response(request, {
'success': False,
'reason': 'incorrect',
}, HttpUnauthorized)
def logout(self, request, **kwargs):
self.method_check(request, allowed=['get'])
if request.user and request.user.is_authenticated():
logout(request)
return self.create_response(request, {'success': True})
else:
return self.create_response(request, {'success': False}, HttpUnauthorized)
def hydrate(self, bundle):
# About to do some ninja skills
if bundle.request.method == 'PATCH':
bundle.data['password'] = models.UserProfile.objects.get(pk=int(bundle.data['id'])).password
else:
bundle.data['password'] = make_password(bundle.data['password'])
if bundle.data['birthdate']:
birthdate = bundle.data['birthdate'].split("-")
birthdate = date(year=int(birthdate[0]), month=int(
birthdate[1]), day=int(birthdate[2]))
bundle.data['birthdate'] = birthdate
bundle.data['avatar'] = "http://www.gravatar.com/avatar/" + hashlib.md5(bundle.data['email'].lower()).hexdigest();
return bundle
def loggedin(self, request, **kwargs):
self.method_check(request, allowed=['get'])
if request.user.is_authenticated():
return self.create_response(request, {
'success': True,
'id': request.user.id,
})
else:
return self.create_response(request, {
'success': False
})
def checkfordupe(self, request, **kwargs):
self.method_check(request, allowed=['post'])
data = self.deserialize(request, request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
username = data.get('username', '')
user = None;
try:
user = models.UserProfile.objects.get(username__iexact=username)
except models.UserProfile.DoesNotExist:
return self.create_response(request, {
'success': True,
})
else:
return self.create_response(request, {
'success': False,
'id': user.id,
})
def update(self, request, **kwargs):
self.method_check(request, allowed=['post'])
data = self.deserialize(request, request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
player_volume = data.get('player_volume', '')
player_autoplay = data.get('player_autoplay', '')
player_repeat = data.get('player_repeat', '')
player_format = data.get('player_format', '')
if request.user:
if request.user.is_authenticated():
user = request.user
user.player_volume = player_volume
user.player_autoplay = player_autoplay
user.player_repeat = player_repeat
user.player_format = player_format
user.save(update_fields=['player_volume',
'player_autoplay',
'player_repeat',
'player_format'])
return self.create_response(request, {
'success': True
})
else:
return self.create_response(request, {
'success': False,
}, HttpForbidden)
else:
return self.create_response(request, {
'success': False,
'reason': 'incorrect',
}, HttpUnauthorized)
def count(self, request, **kwargs):
self.method_check(request, allowed=['get'])
count = models.UserProfile.objects.count()
return self.create_response(request, {
'count': count,
})
def save(self, bundle, skip_errors=False):
bundle = super(UserProfileResource, self).save(bundle, skip_errors)
desc = bundle.obj.username + "'s channel description."
channel = models.Channel(description=desc, owner=bundle.obj)
channel.save()
return bundle
class FileResource(resources.ModelResource):
objects_returned = 0
owner = fields.ForeignKey(UserProfileResource, 'owner')
class Meta:
allowed_methods = ['get']
queryset = file_models.File.objects.all()
resource_name = 'music'
filtering = {
'base64id': ALL,
'upload_date': ALL,
'owner': ALL_WITH_RELATIONS,
'views': ALL,
'lastview_date': ALL,
'query': ['icontains',],
}
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/vote%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('vote'), name="api_vote"),
]
# to sort by descending insert '-' (i.e. '-title')
def apply_sorting(self, objects, options=None):
if options:
if 'sortby' in options:
return objects.order_by(options['sortby'])
return super(FileResource, self).apply_sorting(objects, options)
def vote(self, request, **kwargs):
self.method_check(request, allowed=['post'])
data = self.deserialize(request, request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
vote = data.get('vote', '')
base64id = data.get('base64id', '')
userid = data.get('userid', '')
track = None
try:
track = file_models.File.objects.get(base64id__exact=base64id)
user = models.UserProfile.objects.get(pk=userid)
exists = False
if user in track.votes.all():
exists = True
if vote == "like":
track.likes.add(user)
if exists:
track.dislikes.remove(user)
else:
track.dislikes.add(user)
if exists:
track.likes.remove(user)
if not exists:
track.votes.add(user)
except file_models.File.DoesNotExist, models.UserProfile.DoesNotExist:
return self.create_response(request, {
'success': False,
})
else:
return self.create_response(request, {
'success': True,
'dislikes': track.votes.count() - track.likes.count(),
'likes': track.likes.count(),
})
def build_filters(self, filters=None):
if filters is None:
filters = {}
orm_filters = super(FileResource, self).build_filters(filters)
if('query' in filters):
query = filters['query']
query = query.split(' ')
qset = Q()
for q in query:
if len(q.strip()) > 1:
qset &= (
Q(title__icontains=q) |
Q(tags__icontains=q) |
Q(artist__icontains=q)
)
orm_filters.update({'custom': qset})
return orm_filters
def apply_filters(self, request, applicable_filters):
if 'custom' in applicable_filters:
custom = applicable_filters.pop('custom')
else:
custom = None
semi_filtered = super(FileResource, self).apply_filters(request, applicable_filters)
return semi_filtered.filter(custom) if custom else semi_filtered
def dehydrate(self, bundle):
track = file_models.File.objects.get(pk=bundle.data['id'])
bundle.data['likes'] = track.likes.count()
bundle.data['dislikes'] = track.dislikes.count()
if self.objects_returned == 1:
bundle.data['owner'] = bundle.obj.owner.username
bundle.data['avatar'] = bundle.obj.owner.avatar + "?s=64"
if bundle.request.user and bundle.request.user.is_authenticated():
if bundle.request.user in track.likes.all():
bundle.data['voted'] = "like"
elif bundle.request.user in track.dislikes.all():
bundle.data['voted'] = "dislike"
else:
bundle.data['voted'] = "none"
else:
bundle.data['voted'] = "disallowed"
return bundle
def obj_get_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(bundle.request, 'GET'):
# Grab a mutable copy.
filters = bundle.request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
channel = False
if 'owner' in filters:
channel = True
applicable_filters = self.build_filters(filters=filters)
try:
objects = self.apply_filters(bundle.request, applicable_filters)
self.objects_returned = len(objects)
if len(objects) == 1 and applicable_filters and not channel:
obj = objects[0]
obj.views = obj.views + 1
obj.lastview_date = timezone.now()
obj.save(update_fields=['views', 'lastview_date'])
return self.authorized_read_list(objects, bundle)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
class ChannelResource(resources.ModelResource):
class Meta:
allowed_methods = []
queryset = models.Channel.objects.all()
resource_name = 'channel'
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/update%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('update'), name="api_update"),
]
def update(self, request, **kwargs):
self.method_check(request, allowed=['post'])
data = self.deserialize(request, request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
desc = data.get('description', '')
if request.user:
if request.user.is_authenticated():
channel = request.user.channel;
channel.description = desc;
channel.save(update_fields=['description'])
return self.create_response(request, {
'success': True
})
else:
return self.create_response(request, {
'success': False,
}, HttpForbidden)
else:
return self.create_response(request, {
'success': False,
'reason': 'incorrect',
}, HttpUnauthorized)
class CommentResource(resources.ModelResource):
class Meta:
allowed_methods = ['get']
queryset = file_models.Comment.objects.all()
resource_name = 'comment'
serializer = CommentDateSerializer()
filtering = {
'base64id': ALL,
}
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/post%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('post'), name="api_post"),
]
def post(self, request, **kwargs):
self.method_check(request, allowed=['post'])
data = self.deserialize(request, request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
body = data.get('commenttext', '')
fileid = data.get('fileid', '')
if request.user:
if request.user.is_authenticated():
try:
file = file_models.File.objects.get(pk=fileid)
except file_models.File.DoesNotExist:
return self.create_response(request, {
'success': False,
}, HttpForbidden)
else:
comment = file_models.Comment(owner=request.user, body=body, file=file)
comment.save()
file.comments.add(comment)
return self.create_response(request, {
'success': True,
'date': comment.post_date,
})
else:
return self.create_response(request, {
'success': False,
}, HttpForbidden)
else:
return self.create_response(request, {
'success': False,
'reason': 'incorrect',
}, HttpUnauthorized)
def apply_sorting(self, objects, options=None):
if options:
if 'sortby' in options:
return objects.order_by(options['sortby'])
return super(CommentResource, self).apply_sorting(objects, options)
def dehydrate(self, bundle):
bundle.data['owner'] = bundle.obj.owner.username
bundle.data['avatar'] = bundle.obj.owner.avatar + "?s=64"
return bundle
class UserValidation(FieldsValidation):
def __init__(self):
super(
UserValidation, self).__init__(required=['username', 'first_name', 'last_name'],
validated=['username'],
required_post=[
'email', 'password'],
validated_post=['password'],
)
@staticmethod
def password_is_valid(password, bundle):
if len(password) < 6:
return False, 'Password is too short.'
return True, ""
@staticmethod
def username_is_valid(username, bundle):
try:
user = User.objects.get(username=username)
if user is not None and str(user.id) != str(bundle.data.get('id', 0)):
return False, "The username is already taken."
except User.DoesNotExist:
return True, ""
return True, ""
| bsd-3-clause | -6,080,168,689,550,056,000 | 37.150407 | 122 | 0.535962 | false |
ver228/tierpsy-tracker | tierpsy/debugging/check_default_attrs.py | 1 | 3346 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 30 16:51:07 2019
@author: lferiani
"""
import glob
import os
import tables
from tierpsy.helper.misc import RESERVED_EXT
from tierpsy.helper.params import set_unit_conversions, read_unit_conversions
from tierpsy import DFLT_PARAMS_PATH, DFLT_PARAMS_FILES
from tierpsy.helper.params import TrackerParams
#script to correct a previous bug in how the expected_fps, microns_per_pixel are saved.
# actually let's first check if files have gone bad!
#%%
params = TrackerParams(os.path.join(DFLT_PARAMS_PATH, '_AEX_RIG.json'))
expected_fps = params.p_dict['expected_fps']
microns_per_pixel = params.p_dict['microns_per_pixel']
#%%
#main_dir = '/Volumes/behavgenom_archive$/Adam/screening'
#fnames = glob.glob(os.path.join(main_dir, '**', '*.hdf5'), recursive=True)
#dname = '/Volumes/behavgenom_archive$/Ida/test_3/**/*.hdf5'
#dname = '/Volumes/behavgenom_archive$/Ida/LoopBio_rig/180222_blue_light/3/**/*.hdf5'
#dname = '/Volumes/behavgenom$/Bertie/singleplatequiescence/**/*.hdf5'
#fnames = glob.glob(dname, recursive=True)
#
#masked_files = [x for x in fnames if not any(x.endswith(ext) for ext in RESERVED_EXT)]
#skeletons_files = [x for x in fnames if x.endswith('_skeletons.hdf5')]
mv_dname = '/Volumes/behavgenom$/Bertie/singleplatequiescence/MaskedVideos/**/*.hdf5'
fnames = glob.glob(mv_dname, recursive=True)
masked_files = [x for x in fnames if not any(x.endswith(ext) for ext in RESERVED_EXT)]
r_dname = '/Volumes/behavgenom$/Bertie/singleplatequiescence/Results/**/*.hdf5'
r_fnames = glob.glob(r_dname, recursive=True)
skeletons_files = [x for x in r_fnames if x.endswith('_skeletons.hdf5')]
#%% check inconsistencies
print('MaskedVideos without skeletons:')
for f in masked_files:
foo = f.replace('MaskedVideos','Results')
foo = foo.replace('.hdf5','_skeletons.hdf5')
if foo not in skeletons_files:
print(f)
print('skeletons without MaskedVideos:')
for f in skeletons_files:
foo = f.replace('Results','MaskedVideos')
foo = foo.replace('_skeletons.hdf5','.hdf5')
if foo not in masked_files:
print(f)
#%%
def check_attrs(fname):
fps_out, microns_per_pixel_out, is_light_background = read_unit_conversions(fname)
if fps_out != (25.0, 25.0, 'seconds') or \
microns_per_pixel_out != (10.0, 'micrometers'):
print('Fix %s' % os.path.basename(fname))
return
for i,fname in enumerate(masked_files):
if i<900:
continue
if i%100==0:
print(i)
try:
check_attrs(fname)
except:
print('Failed to check %s' % fname)
for i,fname in enumerate(skeletons_files):
if i%100==0:
print(i)
try:
check_attrs(fname)
except:
print('Failed to check %s' % fname)
#%%
def change_attrs(fname, field_name):
print(os.path.basename(fname))
read_unit_conversions(fname)
with tables.File(fname, 'r+') as fid:
group_to_save = fid.get_node(field_name)
set_unit_conversions(group_to_save,
expected_fps=expected_fps,
microns_per_pixel=microns_per_pixel)
read_unit_conversions(fname)
#for fname in masked_files:
# change_attrs(fname, '/mask')
#for fname in skeletons_files:
# change_attrs(fname, '/trajectories_data') | mit | -8,318,422,337,788,364,000 | 29.153153 | 87 | 0.668261 | false |
h2non/pook | pook/matcher.py | 1 | 1652 | class MatcherEngine(list):
"""
HTTP request matcher engine used by `pook.Mock` to test if an
intercepted outgoing HTTP request must be mocked out or not.
"""
def add(self, matcher):
"""
Adds a new matcher function to the current engine.
Arguments:
matcher (function): matcher function to be added.
"""
self.append(matcher)
def flush(self):
"""
Flushes the current matcher engine, removing all the registered
matcher functions.
"""
self.clear()
def match(self, request):
"""
Match the given HTTP request instance against the registered
matcher functions in the current engine.
Arguments:
request (pook.Request): outgoing request to match.
Returns:
tuple(bool, list[Exception]): ``True`` if all matcher tests
passes, otherwise ``False``. Also returns an optional list
of error exceptions.
"""
errors = []
def match(matcher):
try:
return matcher.match(request)
except Exception as err:
err = '{}: {}'.format(type(matcher).__name__, err)
errors.append(err)
return False
return all([match(matcher) for matcher in self]), errors
def __repr__(self):
"""
Returns an human friendly readable instance data representation.
Returns:
str
"""
matchers = [repr(matcher) for matcher in self]
return 'MatcherEngine([\n {}\n])'.format(',\n '.join(matchers))
| mit | 755,252,011,123,870,800 | 28.5 | 74 | 0.555085 | false |
kelvinguu/lang2program | strongsup/embeddings.py | 1 | 8329 | import os
from collections import namedtuple
from os.path import join
import numpy as np
from dependency.data_directory import DataDirectory
from gtd.chrono import verboserate
from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings
from gtd.utils import random_seed, cached_property, ComparableMixin
from strongsup.tables.predicate import WikiTablePredicateType, WikiTablePredicate
from strongsup.tables.world import TableWorld
def emulate_distribution(shape, target_samples, seed=None):
m = np.mean(target_samples)
s = np.std(target_samples)
with random_seed(seed):
samples = np.random.normal(m, s, size=shape)
return samples
class StaticPredicateEmbeddings(SimpleEmbeddings):
"""All base predicate embeddings are initialized with zero vectors."""
def __init__(self, embed_dim, fixed_predicates):
vocab = ContextualPredicateVocab([ContextualPredicate(pred, None) for pred in fixed_predicates])
array = emulate_distribution((len(vocab), embed_dim), GloveEmbeddings(5000).array, seed=0)
super(StaticPredicateEmbeddings, self).__init__(array, vocab)
class TypeEmbeddings(SimpleEmbeddings):
"""All type embeddings are initialized with zero vectors."""
def __init__(self, embed_dim, all_types):
vocab = SimpleVocab(all_types)
array = emulate_distribution((len(vocab), embed_dim), GloveEmbeddings(5000).array, seed=1)
super(TypeEmbeddings, self).__init__(array, vocab)
class RLongPrimitiveEmbeddings(SimpleEmbeddings):
def __init__(self, embed_dim):
OBJECT = 'object'
LIST = 'list'
tokens = [
OBJECT, LIST,
'r', 'y', 'g', 'o', 'p', 'b', 'e', # 7 colors
'color-na', # if an Alchemy beaker is empty or has multiple colors
# TODO(kelvin): change the behavior of RLongAlchemyObject.color to return `color-na`
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # 0 index is used to represent things that are not visible
-1,
'X1/1',
'0', '1', '2', '3', '4', # Shapes!
]
vocab = SimpleVocab(tokens)
vocab.OBJECT = OBJECT
vocab.LIST = LIST
array = emulate_distribution((len(vocab), embed_dim), GloveEmbeddings(5000).array, seed=3)
super(RLongPrimitiveEmbeddings, self).__init__(array, vocab)
class UtteranceVocab(SimpleVocab):
"""Vocab for input utterances.
IMPORTANT NOTE: UtteranceVocab is blind to casing! All words are converted to lower-case.
An UtteranceVocab is required to have the following special tokens: UNK, PAD
See class attributes for more info.
"""
UNK = u"<unk>"
PAD = u"<pad>"
SPECIAL_TOKENS = (UNK, PAD)
def __init__(self, tokens):
tokens = [t.lower() for t in tokens]
super(UtteranceVocab, self).__init__(tokens)
# check that all special tokens present
for special in self.SPECIAL_TOKENS:
if special not in self._word2index:
raise ValueError('All special tokens must be present in tokens. Missing {}'.format(special))
def word2index(self, w):
"""Map a word to an integer.
If the word is not known to the vocab, return the index for UNK.
"""
sup = super(UtteranceVocab, self)
try:
return sup.word2index(w.lower())
except KeyError:
return sup.word2index(self.UNK)
class GloveEmbeddings(SimpleEmbeddings):
def __init__(self, vocab_size=400000):
"""Load GloveEmbeddings.
Args:
word_vocab_size (int): max # of words in the vocab. If not specified, uses all available GloVe vectors.
Returns:
(np.array, SemgenVocab)
"""
embed_dim = 100
if vocab_size < 5000:
raise ValueError('Need to at least use 5000 words.')
glove_path = join(DataDirectory.glove, 'glove.6B.100d.txt')
download_path = 'http://nlp.stanford.edu/data/glove.6B.zip'
if not os.path.exists(glove_path):
raise RuntimeError('Missing file: {}. Download it here: {}'.format(glove_path, download_path))
# embeddings for special words
words = list(UtteranceVocab.SPECIAL_TOKENS)
num_special = len(words)
embeds = [np.zeros(embed_dim, dtype=np.float32) for _ in words] # zeros are just placeholders for now
with open(glove_path, 'r') as f:
lines = verboserate(f, desc='Loading GloVe embeddings', total=vocab_size, initial=num_special)
for i, line in enumerate(lines, start=num_special):
if i == vocab_size: break
tokens = line.split()
word, embed = tokens[0], np.array([float(tok) for tok in tokens[1:]])
words.append(word)
embeds.append(embed)
vocab = UtteranceVocab(words)
embed_matrix = np.stack(embeds)
special_embeds = emulate_distribution((num_special, embed_dim), embed_matrix[:5000, :], seed=2)
embed_matrix[:num_special, :] = special_embeds
assert embed_matrix.shape[1] == 100
super(GloveEmbeddings, self).__init__(embed_matrix, vocab)
ContextualPredicate = namedtuple('ContextualPredicate', ['predicate', 'utterance'])
# A predicate paired with the utterance it may be mentioned in.
#
# Args:
# predicate (Predicate)
# utterance (Utterance)
class ContextualPredicateVocab(SimpleVocab):
def __init__(self, tokens):
"""Create Vocab.
Args:
tokens (list[ContextualPredicate]): each token is a (Predicate, Context) pair.
"""
for tok in tokens:
if not isinstance(tok, ContextualPredicate):
raise ValueError("Every token must be a ContextualPredicate.")
super(ContextualPredicateVocab, self).__init__(tokens)
class Vocabs(object):
def __init__(self, utterances, domain):
"""Construct Vocabs.
Args:
utterances (frozenset[Utterance]): a frozenset of Utterance objects
"""
assert isinstance(utterances, frozenset)
self._utterance_set = utterances
self._fixed_predicates = domain.fixed_predicates
self._fixed_predicates_set = set(self._fixed_predicates)
def __hash__(self):
return hash(self._utterance_set)
def __eq__(self, other):
if not isinstance(other, Vocabs):
return False
return self._utterance_set == other._utterance_set
@cached_property
def utterances(self):
tokens = sorted(list(self._utterance_set))
return SimpleVocab(tokens)
def as_contextual_pred(self, pred, utterance):
if self.is_static_pred(pred):
utterance = None
return ContextualPredicate(pred, utterance)
def is_static_pred(self, pred):
return pred in self._fixed_predicates_set
@cached_property
def static_preds(self):
return ContextualPredicateVocab([self.as_contextual_pred(pred, None) for pred in self._fixed_predicates])
@cached_property
def dynamic_preds(self):
tokens = set()
for utterance in self._utterance_set:
for pred in utterance.context.predicates:
if not self.is_static_pred(pred):
tokens.add(self.as_contextual_pred(pred, utterance))
# include all entities in the corresponding table
# TODO(kelvin): improve this hack
world = utterance.context.world
if isinstance(world, TableWorld):
graph = world.graph
rows = graph.all_rows
ent_strs = set()
for col_str in graph.all_columns:
ent_strs.update(graph.reversed_join(col_str, rows))
ents = [WikiTablePredicate(s) for s in ent_strs]
tokens.update([self.as_contextual_pred(e, utterance) for e in ents])
# necessary to ensure a deterministic result
tokens = sorted(list(tokens))
return ContextualPredicateVocab(tokens)
@cached_property
def all_preds(self):
static = self.static_preds
dynamic = self.dynamic_preds
joint_tokens = []
joint_tokens.extend(static.tokens)
joint_tokens.extend(dynamic.tokens)
return ContextualPredicateVocab(joint_tokens)
| apache-2.0 | -25,720,945,844,418,384 | 35.213043 | 115 | 0.629848 | false |
eviljeff/olympia | src/olympia/versions/tests/test_compare.py | 1 | 5402 | # -*- coding: utf-8 -*-
from olympia.versions.compare import (
MAX_VERSION_PART, version_dict, version_int, VersionString)
def test_version_int():
"""Tests that version_int outputs correct integer values."""
assert version_int('3.5.0a1pre2') == 3050000001002
assert version_int('') == 200100
assert version_int('0') == 200100
assert version_int('*') == 65535999999200100
assert version_int('*.0') == 65535000000200100
assert version_int(MAX_VERSION_PART) == 65535000000200100
assert version_int(MAX_VERSION_PART + 1) == 65535000000200100
assert version_int(f'{MAX_VERSION_PART}.100') == 65535990000200100
def test_version_int_compare():
assert version_int('3.6.0.*') == version_int('3.6.0.99')
assert version_int('3.6.*.0') == version_int('3.6.99')
assert version_int('3.6.*') > version_int('3.6.8')
assert version_int('3.6.*') > version_int('3.6.99.98')
assert version_int('*') == version_int('65535.99.99.99')
assert version_int('*.0') == version_int('65535')
assert version_int('98.*') < version_int('*')
assert version_int('5.*.0') == version_int('5.99')
assert version_int('5.*') > version_int('5.0.*')
class TestVersionString():
def test_equality(self):
assert VersionString('3.6.0.0') == VersionString('3.6')
assert VersionString('3.6.*.0') != VersionString('3.6.*')
assert VersionString('*') == VersionString('*.*.*.*')
assert VersionString('*.0.0.0') != VersionString('65535')
assert VersionString('3.6.*') != VersionString('3.6.65535')
assert VersionString('*') != VersionString('65535.65535.65535.65535')
assert VersionString('*') != VersionString('65535.0.0.0')
assert VersionString('3.6a5pre9') != VersionString('3.6')
# edge cases with falsey values
assert VersionString('0') != ''
assert VersionString('') != '0'
assert VersionString('0') is not None
assert VersionString('') is not None
none = None
assert VersionString('0') != none
assert VersionString('') != none
def test_comparison(self):
assert VersionString('3.6.*') > VersionString('3.6.8')
assert VersionString('3.6.*') > VersionString('3.6.65535')
assert VersionString('*') > VersionString('65535.0.0.1')
assert VersionString('*') > VersionString('65536.65536.65536.65536')
assert VersionString('*') > VersionString('98.*')
assert VersionString('98.*') < VersionString('*')
assert VersionString('65534.*') < VersionString('*')
assert VersionString('5.*') > VersionString('5.0.*')
assert VersionString('3.6a5pre9') < VersionString('3.6')
assert VersionString('3.6a5pre9') < VersionString('3.6b1')
assert VersionString('3.6.*') > VersionString('3.6a5pre9')
assert VersionString('99.99999999b1') > VersionString('99.99999998b1')
assert VersionString('99999999.99b1') > VersionString('99999998.99b1')
assert VersionString('*') > VersionString('99999998.99b1')
def test_bool(self):
# bool(VersionString(x)) should behave like bool(x)
assert bool(VersionString('')) is False
assert bool(VersionString('0')) is True
assert bool(VersionString(0)) is True
assert bool(VersionString('false')) is True
assert bool(VersionString('False')) is True
assert bool(VersionString('something')) is True
assert bool(VersionString('3.6.*')) is True
assert bool(VersionString('3.6')) is True
assert bool(VersionString('*')) is True
def test_parts(self):
assert tuple(VersionString('3.6a5pre9').parts) == (
('major', 3),
('minor1', 6),
('minor2', 0),
('minor3', 0),
('alpha', 'a'),
('alpha_ver', 5),
('pre', 'pre'),
('pre_ver', 9),
)
assert tuple(VersionString('3.6.*').parts) == (
('major', 3),
('minor1', 6),
('minor2', '*'),
('minor3', '*'),
('alpha', ''),
('alpha_ver', 0),
('pre', ''),
('pre_ver', 0),
)
def test_part_indexing(self):
vs = VersionString('32.6pre9')
assert vs['major'] == 32
assert vs['minor3'] == 0
assert vs['alpha'] == ''
assert vs['pre'] == 'pre'
assert vs['pre_ver'] == 9
assert vs[0] == '3' # normal string indexing still works
assert vs[3:5] == '6p'
def test_version_dict():
assert version_dict('5.0.*') == (
{'major': 5,
'minor1': 0,
'minor2': 65535,
'minor3': None,
'alpha': None,
'alpha_ver': None,
'pre': None,
'pre_ver': None})
assert version_dict('5.0.*', asterisk_value=1234) == (
{'major': 5,
'minor1': 0,
'minor2': 1234,
'minor3': None,
'alpha': None,
'alpha_ver': None,
'pre': None,
'pre_ver': None})
assert version_dict('*.0.*', asterisk_value='@') == (
{'major': '@',
'minor1': 0,
'minor2': '@',
'minor3': None,
'alpha': None,
'alpha_ver': None,
'pre': None,
'pre_ver': None})
def test_version_int_unicode():
assert version_int(u'\u2322 ugh stephend') == 200100
| bsd-3-clause | -6,424,486,701,864,954,000 | 36.776224 | 78 | 0.553684 | false |
rhyolight/nupic.son | tests/app/soc/modules/seeder/logic/ndb_models.py | 1 | 1269 | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ndb model classes for seeder testing."""
from google.appengine.ext import ndb
from melange.appengine import db as db_util
class NdbDummyModel(ndb.Model):
"""A ndb dummy model class for seeder testing."""
boolean = ndb.BooleanProperty(required=True)
name = ndb.StringProperty(required=True)
link = ndb.StringProperty(required=True, validator=db_util.link_validator)
email = ndb.StringProperty(required=True, validator=db_util.email_validator)
numbers = ndb.IntegerProperty(repeated=True)
class NdbKeyProperty(ndb.Model):
"""A ndb model class with KeyProperty for seeder testing."""
name = ndb.StringProperty(required=True)
key = ndb.KeyProperty(required=True)
| apache-2.0 | -8,367,067,377,884,833,000 | 36.323529 | 78 | 0.764381 | false |
JeremyOT/Toto | tests/disabled_test_worker.py | 1 | 9057 | import unittest
import urllib2
import json
import os
import signal
from uuid import uuid4
from toto.secret import *
from multiprocessing import Process, active_children
from toto.worker import TotoWorkerService
from toto.zmqworkerconnection import ZMQWorkerConnection
from tornado.gen import coroutine
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from time import sleep, time
from threading import Thread
def run_loop(func):
def wrapper():
ioloop = IOLoop()
@coroutine
def looped():
yield func()
ioloop.stop()
ioloop.add_callback(looped)
thread = Thread(target=ioloop.start)
thread.start()
return wrapper
def run_server(port, daemon='start'):
TotoWorkerService(method_module='worker_methods', worker_bind_address='tcp://*:%d' % port, worker_socket_address='ipc:///tmp/workerservice%d.sock' % port, control_socket_address='ipc:///tmp/workercontrol%d.sock', debug=True, daemon=daemon, pidfile='worker-%d.pid' % port).run()
def invoke_synchronously(worker, method, parameters, **kwargs):
resp = []
def cb(response):
resp.append(response)
worker.invoke(method, parameters, callback=cb, **kwargs)
while not resp:
sleep(0.1)
return resp[0]
class TestWorker(unittest.TestCase):
@classmethod
def setUpClass(cls):
print 'Starting worker'
for p in [Process(target=run_server, args=[9001 + i]) for i in xrange(3)]:
p.start()
sleep(0.5)
cls.worker_addresses = ['tcp://127.0.0.1:%d' % (9001 + i) for i in xrange(3)]
cls.worker = ZMQWorkerConnection(cls.worker_addresses[0])
@classmethod
def tearDownClass(cls):
print 'Stopping worker'
for p in [Process(target=run_server, args=[9001 + i, 'stop']) for i in xrange(3)]:
p.start()
sleep(0.5)
def test_method(self):
resp = []
def cb(response):
resp.append(response)
parameters = {'arg1': 1, 'arg2': 'hello'}
self.worker.invoke('return_value', parameters, callback=cb)
while not resp:
sleep(0.1)
self.assertEqual(parameters, resp[0]['parameters'])
def test_method_generator(self):
resp = []
parameters = {'arg1': 1, 'arg2': 'hello'}
@run_loop
@coroutine
def run():
resp.append((yield self.worker.invoke('return_value', parameters, await=True)))
run()
while not resp:
sleep(0.1)
self.assertEqual(parameters, resp[0]['parameters'])
def test_method_alt_invocation(self):
resp = []
def cb(response):
resp.append(response)
parameters = {'arg1': 1, 'arg2': 'hello'}
self.worker.return_value(parameters, callback=cb)
while not resp:
sleep(0.1)
self.assertEqual(parameters, resp[0]['parameters'])
def test_method_alt_invocation_generator(self):
resp = []
parameters = {'arg1': 1, 'arg2': 'hello'}
@run_loop
@coroutine
def run():
resp.append((yield self.worker.return_value(parameters, await=True)))
run()
while not resp:
sleep(0.1)
self.assertEqual(parameters, resp[0]['parameters'])
def test_bad_method(self):
resp = []
def cb(response):
resp.append(response)
parameters = {'arg1': 1, 'arg2': 'hello'}
self.worker.invoke('bad_method', parameters, callback=cb)
while not resp:
sleep(0.1)
self.assertEqual(resp[0]['error']['code'], 1000)
self.assertEqual(resp[0]['error']['value'], "'module' object has no attribute 'bad_method'")
def test_exception(self):
resp = []
def cb(response):
resp.append(response)
parameters = {'arg1': 1, 'arg2': 'hello'}
self.worker.invoke('throw_exception', parameters, callback=cb)
while not resp:
sleep(0.1)
self.assertEqual(resp[0]['error']['code'], 1000)
self.assertEqual(resp[0]['error']['value'], "Test Exception")
def test_exception_generator(self):
resp = []
parameters = {'arg1': 1, 'arg2': 'hello'}
@run_loop
@coroutine
def run():
resp.append((yield self.worker.invoke('throw_exception', parameters, await=True)))
run()
while not resp:
sleep(0.1)
self.assertEqual(resp[0]['error']['code'], 1000)
self.assertEqual(resp[0]['error']['value'], "Test Exception")
def test_toto_exception(self):
resp = []
def cb(response):
resp.append(response)
parameters = {'arg1': 1, 'arg2': 'hello'}
self.worker.invoke('throw_toto_exception', parameters, callback=cb)
while not resp:
sleep(0.1)
self.assertEqual(resp[0]['error']['code'], 4242)
self.assertEqual(resp[0]['error']['value'], "Test Toto Exception")
def test_toto_exception_generator(self):
resp = []
@run_loop
@coroutine
def run():
parameters = {'arg1': 1, 'arg2': 'hello'}
resp.append((yield self.worker.invoke('throw_toto_exception', parameters, await=True)))
run()
while not resp:
sleep(0.1)
self.assertEqual(resp[0]['error']['code'], 4242)
self.assertEqual(resp[0]['error']['value'], "Test Toto Exception")
def test_add_connection(self):
self.worker.add_connection(self.worker_addresses[1])
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] in self.worker.active_connections)
self.worker.add_connection(self.worker_addresses[2])
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] in self.worker.active_connections)
self.worker.set_connections(self.worker_addresses[:1])
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] not in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] not in self.worker.active_connections)
self.worker.set_connections(self.worker_addresses)
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] in self.worker.active_connections)
self.worker.set_connections(self.worker_addresses[2:])
sleep(0.1)
self.assertTrue(self.worker_addresses[0] not in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] not in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] in self.worker.active_connections)
self.worker.set_connections(self.worker_addresses[:1])
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] not in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] not in self.worker.active_connections)
def test_remove_connection(self):
self.worker.set_connections(self.worker_addresses)
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] in self.worker.active_connections)
self.worker.remove_connection(self.worker_addresses[1])
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] not in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] in self.worker.active_connections)
self.worker.remove_connection(self.worker_addresses[2])
sleep(0.1)
self.assertTrue(self.worker_addresses[0] in self.worker.active_connections)
self.assertTrue(self.worker_addresses[1] not in self.worker.active_connections)
self.assertTrue(self.worker_addresses[2] not in self.worker.active_connections)
def test_remote_messaging(self):
self.worker.set_connections(self.worker_addresses)
sleep(0.1)
worker_ids = list()
for i in xrange(3):
self.worker.return_pid(callback=lambda response: worker_ids.append(response['pid']))
while len(worker_ids) < 3:
sleep(0.1)
self.assertEqual(len(set(worker_ids)), 3)
self.worker.set_connections(self.worker_addresses[:1])
sleep(0.1)
worker_ids = list()
for i in xrange(3):
self.worker.return_pid(callback=lambda response: worker_ids.append(response['pid']))
while len(worker_ids) < 3:
sleep(0.1)
self.assertEqual(len(set(worker_ids)), 1)
def test_worker_routing(self):
self.worker.set_connections(self.worker_addresses)
sleep(0.1)
worker_ids = list()
for i in xrange(30):
sleep(0.01)
self.worker.return_pid(callback=lambda response: worker_ids.append(response['pid']))
while len(worker_ids) < 30:
sleep(0.1)
self.worker.set_connections(self.worker_addresses[:1])
sleep(0.1)
order = (worker_ids[0], worker_ids[1], worker_ids[2])
self.assertEqual(len(set(order)), len(order))
for i in xrange(3, 3, 30):
self.assertSquenceEqual(order, worker_ids[i:i+3])
| mit | -187,180,703,617,007,360 | 36.118852 | 279 | 0.685105 | false |
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_CISCO_MPLS_LSR_EXT_STD_MIB.py | 1 | 5154 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'CiscoMplsLsrExtStdMib.Cmplsxcexttable.Cmplsxcextentry' : {
'meta_info' : _MetaInfoClass('CiscoMplsLsrExtStdMib.Cmplsxcexttable.Cmplsxcextentry',
False,
[
_MetaInfoClassMember('mplsXCIndex', ATTRIBUTE, 'str' , None, None,
[(1, 24)], [],
''' ''',
'mplsxcindex',
'CISCO-MPLS-LSR-EXT-STD-MIB', True),
_MetaInfoClassMember('mplsXCInSegmentIndex', ATTRIBUTE, 'str' , None, None,
[(1, 24)], [],
''' ''',
'mplsxcinsegmentindex',
'CISCO-MPLS-LSR-EXT-STD-MIB', True),
_MetaInfoClassMember('mplsXCOutSegmentIndex', ATTRIBUTE, 'str' , None, None,
[(1, 24)], [],
''' ''',
'mplsxcoutsegmentindex',
'CISCO-MPLS-LSR-EXT-STD-MIB', True),
_MetaInfoClassMember('cmplsXCExtTunnelPointer', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*'],
''' This object indicates the back pointer to the tunnel
entry segment. This object cannot be modified if
mplsXCRowStatus for the corresponding entry in the
mplsXCTable is active(1).
''',
'cmplsxcexttunnelpointer',
'CISCO-MPLS-LSR-EXT-STD-MIB', False),
_MetaInfoClassMember('cmplsXCOppositeDirXCPtr', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*'],
''' This object indicates the pointer to the opposite
direction XC entry. This object cannot be modified if
mplsXCRowStatus for the corresponding entry in the
mplsXCTable is active(1).
''',
'cmplsxcoppositedirxcptr',
'CISCO-MPLS-LSR-EXT-STD-MIB', False),
],
'CISCO-MPLS-LSR-EXT-STD-MIB',
'cmplsXCExtEntry',
_yang_ns._namespaces['CISCO-MPLS-LSR-EXT-STD-MIB'],
'ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB'
),
},
'CiscoMplsLsrExtStdMib.Cmplsxcexttable' : {
'meta_info' : _MetaInfoClass('CiscoMplsLsrExtStdMib.Cmplsxcexttable',
False,
[
_MetaInfoClassMember('cmplsXCExtEntry', REFERENCE_LIST, 'Cmplsxcextentry' , 'ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB', 'CiscoMplsLsrExtStdMib.Cmplsxcexttable.Cmplsxcextentry',
[], [],
''' An entry in this table extends the cross connect
information represented by an entry in
the mplsXCTable in MPLS-LSR-STD-MIB [RFC3813] through
a sparse augmentation. An entry can be created by
a network administrator via SNMP SET commands, or in
response to signaling protocol events.
''',
'cmplsxcextentry',
'CISCO-MPLS-LSR-EXT-STD-MIB', False),
],
'CISCO-MPLS-LSR-EXT-STD-MIB',
'cmplsXCExtTable',
_yang_ns._namespaces['CISCO-MPLS-LSR-EXT-STD-MIB'],
'ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB'
),
},
'CiscoMplsLsrExtStdMib' : {
'meta_info' : _MetaInfoClass('CiscoMplsLsrExtStdMib',
False,
[
_MetaInfoClassMember('cmplsXCExtTable', REFERENCE_CLASS, 'Cmplsxcexttable' , 'ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB', 'CiscoMplsLsrExtStdMib.Cmplsxcexttable',
[], [],
''' This table sparse augments the mplsXCTable of
MPLS-LSR-STD-MIB [RFC3813] to provide MPLS-TP specific
information about associated tunnel information
''',
'cmplsxcexttable',
'CISCO-MPLS-LSR-EXT-STD-MIB', False),
],
'CISCO-MPLS-LSR-EXT-STD-MIB',
'CISCO-MPLS-LSR-EXT-STD-MIB',
_yang_ns._namespaces['CISCO-MPLS-LSR-EXT-STD-MIB'],
'ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB'
),
},
}
_meta_table['CiscoMplsLsrExtStdMib.Cmplsxcexttable.Cmplsxcextentry']['meta_info'].parent =_meta_table['CiscoMplsLsrExtStdMib.Cmplsxcexttable']['meta_info']
_meta_table['CiscoMplsLsrExtStdMib.Cmplsxcexttable']['meta_info'].parent =_meta_table['CiscoMplsLsrExtStdMib']['meta_info']
| apache-2.0 | 7,973,730,667,618,890,000 | 49.038835 | 199 | 0.557431 | false |
seagatesoft/dateparser | tests/test_freshness_date_parser.py | 1 | 19130 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
import unittest
from datetime import datetime, timedelta, date, time
from functools import wraps
from dateutil.relativedelta import relativedelta
from mock import Mock, patch
from nose_parameterized import parameterized, param
from dateparser.date import DateDataParser, freshness_date_parser
from tests import BaseTestCase
class TestFreshnessDateDataParser(BaseTestCase):
def setUp(self):
super(TestFreshnessDateDataParser, self).setUp()
self.now = datetime(2014, 9, 1, 10, 30)
self.date_string = NotImplemented
self.parser = NotImplemented
self.result = NotImplemented
self.freshness_parser = NotImplemented
self.freshness_result = NotImplemented
self.date = NotImplemented
self.time = NotImplemented
@parameterized.expand([
# English dates
param('yesterday', ago={'days': 1}, period='day'),
param('the day before yesterday', ago={'days': 2}, period='day'),
param('today', ago={'days': 0}, period='day'),
param('an hour ago', ago={'hours': 1}, period='day'),
param('about an hour ago', ago={'hours': 1}, period='day'),
param('a day ago', ago={'days': 1}, period='day'),
param('a week ago', ago={'weeks': 1}, period='week'),
param('one week ago', ago={'weeks': 1}, period='week'),
param('2 hours ago', ago={'hours': 2}, period='day'),
param('about 23 hours ago', ago={'hours': 23}, period='day'),
param('1 year 2 months', ago={'years': 1, 'months': 2}, period='month'),
param('1 year, 09 months,01 weeks', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 year 11 months', ago={'years': 1, 'months': 11}, period='month'),
param('1 year 12 months', ago={'years': 1, 'months': 12}, period='month'),
param('15 hr', ago={'hours': 15}, period='day'),
param('15 hrs', ago={'hours': 15}, period='day'),
param('2 min', ago={'minutes': 2}, period='day'),
param('2 mins', ago={'minutes': 2}, period='day'),
param('3 sec', ago={'seconds': 3}, period='day'),
param('1000 years ago', ago={'years': 1000}, period='year'),
param('2013 years ago', ago={'years': 2013}, period='year'), # We've fixed .now in setUp
param('5000 months ago', ago={'years': 416, 'months': 8}, period='month'),
param('{} months ago'.format(2013 * 12 + 8), ago={'years': 2013, 'months': 8}, period='month'),
param('1 year, 1 month, 1 week, 1 day, 1 hour and 1 minute ago',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
param('just now', ago={'seconds':0}, period='day'),
# French dates
param("Aujourd'hui", ago={'days': 0}, period='day'),
param("Hier", ago={'days': 1}, period='day'),
param("Avant-hier", ago={'days': 2}, period='day'),
param('Il ya un jour', ago={'days': 1}, period='day'),
param('Il ya une heure', ago={'hours': 1}, period='day'),
param('Il ya 2 heures', ago={'hours': 2}, period='day'),
param('Il ya environ 23 heures', ago={'hours': 23}, period='day'),
param('1 an 2 mois', ago={'years': 1, 'months': 2}, period='month'),
param('1 année, 09 mois, 01 semaines', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 an 11 mois', ago={'years': 1, 'months': 11}, period='month'),
param('Il ya 1 an, 1 mois, 1 semaine, 1 jour, 1 heure et 1 minute',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
param('Il y a 40 min', ago={'minutes': 40}, period='day'),
# German dates
param('Heute', ago={'days': 0}, period='day'),
param('Gestern', ago={'days': 1}, period='day'),
param('vorgestern', ago={'days': 2}, period='day'),
param('vor einem Tag', ago={'days': 1}, period='day'),
param('vor einer Stunden', ago={'hours': 1}, period='day'),
param('Vor 2 Stunden', ago={'hours': 2}, period='day'),
param('Vor 2 Stunden', ago={'hours': 2}, period='day'),
param('vor etwa 23 Stunden', ago={'hours': 23}, period='day'),
param('1 Jahr 2 Monate', ago={'years': 1, 'months': 2}, period='month'),
param('1 Jahr, 09 Monate, 01 Wochen', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 Jahr 11 Monate', ago={'years': 1, 'months': 11}, period='month'),
param('vor 29h', ago={'hours': 29}, period='day'),
param('vor 29m', ago={'minutes': 29}, period='day'),
param('1 Jahr, 1 Monat, 1 Woche, 1 Tag, 1 Stunde und 1 Minute',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Italian dates
param('oggi', ago={'days': 0}, period='day'),
param('ieri', ago={'days': 1}, period='day'),
param('2 ore fa', ago={'hours': 2}, period='day'),
param('circa 23 ore fa', ago={'hours': 23}, period='day'),
param('1 anno 2 mesi', ago={'years': 1, 'months': 2}, period='month'),
param('1 anno, 09 mesi, 01 settimane', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 anno 11 mesi', ago={'years': 1, 'months': 11}, period='month'),
param('1 anno, 1 mese, 1 settimana, 1 giorno, 1 ora e 1 minuto fa',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Portuguese dates
param('ontem', ago={'days': 1}, period='day'),
param('anteontem', ago={'days': 2}, period='day'),
param('hoje', ago={'days': 0}, period='day'),
param('uma hora atrás', ago={'hours': 1}, period='day'),
param('um dia atrás', ago={'days': 1}, period='day'),
param('uma semana atrás', ago={'weeks': 1}, period='week'),
param('2 horas atrás', ago={'hours': 2}, period='day'),
param('cerca de 23 horas atrás', ago={'hours': 23}, period='day'),
param('1 ano 2 meses', ago={'years': 1, 'months': 2}, period='month'),
param('1 ano, 09 meses, 01 semanas', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 ano 11 meses', ago={'years': 1, 'months': 11}, period='month'),
param('1 ano, 1 mês, 1 semana, 1 dia, 1 hora e 1 minuto atrás',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Turkish dates
param('Dün', ago={'days': 1}, period='day'),
param('Bugün', ago={'days': 0}, period='day'),
param('2 saat önce', ago={'hours': 2}, period='day'),
param('yaklaşık 23 saat önce', ago={'hours': 23}, period='day'),
param('1 yıl 2 ay', ago={'years': 1, 'months': 2}, period='month'),
param('1 yıl, 09 ay, 01 hafta', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 yıl 11 ay', ago={'years': 1, 'months': 11}, period='month'),
param('1 yıl, 1 ay, 1 hafta, 1 gün, 1 saat ve 1 dakika önce',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Russian dates
param('сегодня', ago={'days': 0}, period='day'),
param('Вчера в', ago={'days': 1}, period='day'),
param('вчера', ago={'days': 1}, period='day'),
param('2 часа назад', ago={'hours': 2}, period='day'),
param('час назад', ago={'hours': 1}, period='day'),
param('минуту назад', ago={'minutes': 1}, period='day'),
param('2 ч. 21 мин. назад', ago={'hours': 2, 'minutes': 21}, period='day'),
param('около 23 часов назад', ago={'hours': 23}, period='day'),
param('1 год 2 месяца', ago={'years': 1, 'months': 2}, period='month'),
param('1 год, 09 месяцев, 01 недель', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 год 11 месяцев', ago={'years': 1, 'months': 11}, period='month'),
param('1 год, 1 месяц, 1 неделя, 1 день, 1 час и 1 минуту назад',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Czech dates
param('Dnes', ago={'days': 0}, period='day'),
param('Včera', ago={'days': 1}, period='day'),
param('Předevčírem', ago={'days': 2}, period='day'),
param('Před 2 hodinami', ago={'hours': 2}, period='day'),
param('před přibližně 23 hodin', ago={'hours': 23}, period='day'),
param('1 rok 2 měsíce', ago={'years': 1, 'months': 2}, period='month'),
param('1 rok, 09 měsíců, 01 týdnů', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 rok 11 měsíců', ago={'years': 1, 'months': 11}, period='month'),
param('3 dny', ago={'days': 3}, period='day'),
param('3 hodiny', ago={'hours': 3}, period='day'),
param('1 rok, 1 měsíc, 1 týden, 1 den, 1 hodina, 1 minuta před',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Spanish dates
param('anteayer', ago={'days': 2}, period='day'),
param('ayer', ago={'days': 1}, period='day'),
param('hoy', ago={'days': 0}, period='day'),
param('hace una hora', ago={'hours': 1}, period='day'),
param('Hace un día', ago={'days': 1}, period='day'),
param('Hace una semana', ago={'weeks': 1}, period='week'),
param('Hace 2 horas', ago={'hours': 2}, period='day'),
param('Hace cerca de 23 horas', ago={'hours': 23}, period='day'),
param('1 año 2 meses', ago={'years': 1, 'months': 2}, period='month'),
param('1 año, 09 meses, 01 semanas', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1 año 11 meses', ago={'years': 1, 'months': 11}, period='month'),
param('Hace 1 año, 1 mes, 1 semana, 1 día, 1 hora y 1 minuto',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Chinese dates
param('昨天', ago={'days': 1}, period='day'),
param('前天', ago={'days': 2}, period='day'),
param('2小时前', ago={'hours': 2}, period='day'),
param('约23小时前', ago={'hours': 23}, period='day'),
param('1年2个月', ago={'years': 1, 'months': 2}, period='month'),
param('1年09月,01周', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('1年11个月', ago={'years': 1, 'months': 11}, period='month'),
param('1年,1月,1周,1天,1小时,1分钟前',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Arabic dates
param('اليوم', ago={'days': 0}, period='day'),
param('يوم أمس', ago={'days': 1}, period='day'),
param('منذ يومين', ago={'days': 2}, period='day'),
param('منذ 3 أيام', ago={'days': 3}, period='day'),
param('منذ 21 أيام', ago={'days': 21}, period='day'),
param('1 عام, 1 شهر, 1 أسبوع, 1 يوم, 1 ساعة, 1 دقيقة',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Thai dates
param('วันนี้', ago={'days': 0}, period='day'),
param('เมื่อวานนี้', ago={'days': 1}, period='day'),
param('2 วัน', ago={'days': 2}, period='day'),
param('2 ชั่วโมง', ago={'hours': 2}, period='day'),
param('23 ชม.', ago={'hours': 23}, period='day'),
param('2 สัปดาห์ 3 วัน', ago={'weeks': 2, 'days': 3}, period='day'),
param('1 ปี 9 เดือน 1 สัปดาห์', ago={'years': 1, 'months': 9, 'weeks': 1},
period='week'),
param('1 ปี 1 เดือน 1 สัปดาห์ 1 วัน 1 ชั่วโมง 1 นาที',
ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
period='day'),
# Vietnamese dates
param('Hôm nay', ago={'days': 0}, period='day'),
param('Hôm qua', ago={'days': 1}, period='day'),
param('2 giờ', ago={'hours': 2}, period='day'),
param('2 tuần 3 ngày', ago={'weeks': 2, 'days': 3}, period='day'),
# following test unsupported, refer to discussion at:
# http://github.com/scrapinghub/dateparser/issues/33
#param('1 năm 1 tháng 1 tuần 1 ngày 1 giờ 1 chút',
# ago={'years': 1, 'months': 1, 'weeks': 1, 'days': 1, 'hours': 1, 'minutes': 1},
# period='day'),
# Belarusian dates
param('сёння', ago={'days': 0}, period='day'),
param('учора ў', ago={'days': 1}, period='day'),
param('ўчора', ago={'days': 1}, period='day'),
param('пазаўчора', ago={'days': 2}, period='day'),
param('2 гадзіны таму назад', ago={'hours': 2}, period='day'),
param('2 гадзіны таму', ago={'hours': 2}, period='day'),
param('гадзіну назад', ago={'hours': 1}, period='day'),
param('хвіліну таму', ago={'minutes': 1}, period='day'),
param('2 гадзіны 21 хвіл. назад', ago={'hours': 2, 'minutes': 21}, period='day'),
param('каля 23 гадзін назад', ago={'hours': 23}, period='day'),
param('1 год 2 месяцы', ago={'years': 1, 'months': 2}, period='month'),
param('1 год, 09 месяцаў, 01 тыдзень', ago={'years': 1, 'months': 9, 'weeks': 1}, period='week'),
param('2 гады 3 месяцы', ago={'years': 2, 'months': 3}, period='month'),
param('5 гадоў, 1 месяц, 6 тыдняў, 3 дні, 5 гадзін 1 хвіліну і 3 секунды таму назад',
ago={'years': 5, 'months': 1, 'weeks': 6, 'days': 3, 'hours': 5, 'minutes': 1, 'seconds': 3},
period='day'),
])
def test_relative_dates(self, date_string, ago, period):
self.given_parser()
self.given_date_string(date_string)
self.when_date_is_parsed()
self.then_error_was_not_raised()
self.then_date_was_parsed_by_freshness_parser()
self.then_date_obj_is_exactly_this_time_ago(ago)
self.then_period_is(period)
@parameterized.expand([
param('15th of Aug, 2014 Diane Bennett'),
])
def test_insane_dates(self, date_string):
self.given_parser()
self.given_date_string(date_string)
self.when_date_is_parsed()
self.then_error_was_not_raised()
self.then_date_was_not_parsed()
@parameterized.expand([
param('5000 years ago'),
param('2014 years ago'), # We've fixed .now in setUp
param('{} months ago'.format(2013 * 12 + 9)),
])
def test_dates_not_supported_by_date_time(self, date_string):
self.given_parser()
self.given_date_string(date_string)
self.when_date_is_parsed()
self.then_error_was_raised(ValueError, ['year is out of range',
"('year must be in 1..9999'"])
@parameterized.expand([
param('несколько секунд назад', boundary={'seconds': 45}, period='day'),
param('há alguns segundos', boundary={'seconds': 45}, period='day'),
])
def test_inexplicit_dates(self, date_string, boundary, period):
self.given_parser()
self.given_date_string(date_string)
self.when_date_is_parsed()
self.then_error_was_not_raised()
self.then_date_was_parsed_by_freshness_parser()
self.then_period_is(period)
self.then_date_obj_is_between(self.now - timedelta(**boundary), self.now)
@parameterized.expand([
param('Today at 9 pm', date(2014, 9, 1), time(21, 0)),
param('Today at 11:20 am', date(2014, 9, 1), time(11, 20)),
param('Yesterday 1:20 pm', date(2014, 8, 31), time(13, 20)),
param('the day before yesterday 16:50', date(2014, 8, 30), time(16, 50)),
param('2 Tage 18:50', date(2014, 8, 30), time(18, 50)),
param('1 day ago at 2 PM', date(2014, 8, 31), time(14, 0)),
param('Dnes v 12:40', date(2014, 9, 1), time(12, 40)),
])
def test_freshness_date_with_time(self, date_string, date, time):
self.given_parser()
self.given_date_string(date_string)
self.when_date_is_parsed()
self.then_date_is(date)
self.then_time_is(time)
def given_date_string(self, date_string):
self.date_string = date_string
def given_parser(self):
self.add_patch(patch.object(freshness_date_parser, 'now', self.now))
def collecting_get_date_data(get_date_data):
@wraps(get_date_data)
def wrapped(date_string):
self.freshness_result = get_date_data(date_string)
return self.freshness_result
return wrapped
self.add_patch(patch.object(freshness_date_parser,
'get_date_data',
collecting_get_date_data(freshness_date_parser.get_date_data)))
self.freshness_parser = Mock(wraps=freshness_date_parser)
self.add_patch(patch('dateparser.date.freshness_date_parser', new=self.freshness_parser))
self.parser = DateDataParser()
def when_date_is_parsed(self):
try:
self.result = self.parser.get_date_data(self.date_string)
except Exception as error:
self.error = error
def then_date_is(self, date):
self.assertEqual(date, self.result['date_obj'].date())
def then_time_is(self, time):
self.assertEqual(time, self.result['date_obj'].time())
def then_period_is(self, period):
self.assertEqual(period, self.result['period'])
def then_date_obj_is_between(self, low_boundary, high_boundary):
self.assertGreater(self.result['date_obj'], low_boundary)
self.assertLess(self.result['date_obj'], high_boundary)
def then_date_obj_is_exactly_this_time_ago(self, ago):
self.assertEqual(self.now - relativedelta(**ago), self.result['date_obj'])
def then_date_was_not_parsed(self):
self.assertIsNone(self.result['date_obj'], '"%s" should not be parsed' % self.date_string)
def then_date_was_parsed_by_freshness_parser(self):
self.assertEqual(self.result, self.freshness_result)
def then_error_was_not_raised(self):
self.assertEqual(NotImplemented, self.error)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -581,672,396,525,162,600 | 50.619048 | 107 | 0.548567 | false |
jasondunsmore/heat | heat/engine/resources/openstack/nova/flavor.py | 1 | 5017 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class NovaFlavor(resource.Resource):
"""A resource for creating OpenStack virtual hardware templates.
Due to default nova security policy usage of this resource is limited to
being used by administrators only. The rights may also be delegated to
other users by redefining the access controls on the nova-api server.
Note that the current implementation of the Nova Flavor resource does not
allow specifying the name and flavorid properties for the resource.
This is done to avoid potential naming collision upon flavor creation as
all flavor have a global scope.
"""
support_status = support.SupportStatus(version='2014.2')
default_client_name = 'nova'
required_service_extension = 'os-flavor-manage'
entity = 'flavors'
PROPERTIES = (
RAM, VCPUS, DISK, SWAP, EPHEMERAL,
RXTX_FACTOR, EXTRA_SPECS, IS_PUBLIC
) = (
'ram', 'vcpus', 'disk', 'swap', 'ephemeral',
'rxtx_factor', 'extra_specs', 'is_public',
)
ATTRIBUTES = (
IS_PUBLIC_ATTR,
) = (
'is_public',
)
properties_schema = {
RAM: properties.Schema(
properties.Schema.INTEGER,
_('Memory in MB for the flavor.'),
required=True
),
VCPUS: properties.Schema(
properties.Schema.INTEGER,
_('Number of VCPUs for the flavor.'),
required=True
),
DISK: properties.Schema(
properties.Schema.INTEGER,
_('Size of local disk in GB. The "0" size is a special case that '
'uses the native base image size as the size of the ephemeral '
'root volume.'),
default=0
),
SWAP: properties.Schema(
properties.Schema.INTEGER,
_('Swap space in MB.'),
default=0
),
EPHEMERAL: properties.Schema(
properties.Schema.INTEGER,
_('Size of a secondary ephemeral data disk in GB.'),
default=0
),
RXTX_FACTOR: properties.Schema(
properties.Schema.NUMBER,
_('RX/TX factor.'),
default=1.0
),
EXTRA_SPECS: properties.Schema(
properties.Schema.MAP,
_('Key/Value pairs to extend the capabilities of the flavor.'),
update_allowed=True,
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Scope of flavor accessibility. Public or private. '
'Default value is True, means public, shared '
'across all projects.'),
default=True,
support_status=support.SupportStatus(version='6.0.0'),
),
}
attributes_schema = {
IS_PUBLIC_ATTR: attributes.Schema(
_('Whether the flavor is shared across all projects.'),
support_status=support.SupportStatus(version='6.0.0'),
type=attributes.Schema.BOOLEAN
),
}
def handle_create(self):
args = dict(self.properties)
args['flavorid'] = 'auto'
args['name'] = self.physical_resource_name()
flavor_keys = args.pop(self.EXTRA_SPECS)
flavor = self.client().flavors.create(**args)
self.resource_id_set(flavor.id)
if flavor_keys:
flavor.set_keys(flavor_keys)
tenant = self.stack.context.tenant_id
if not args['is_public']:
# grant access only to the active project(private flavor)
self.client().flavor_access.add_tenant_access(flavor, tenant)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update nova flavor."""
if self.EXTRA_SPECS in prop_diff:
flavor = self.client().flavors.get(self.resource_id)
old_keys = flavor.get_keys()
flavor.unset_keys(old_keys)
new_keys = prop_diff.get(self.EXTRA_SPECS)
if new_keys is not None:
flavor.set_keys(new_keys)
def _resolve_attribute(self, name):
flavor = self.client().flavors.get(self.resource_id)
if name == self.IS_PUBLIC_ATTR:
return getattr(flavor, name)
def resource_mapping():
return {
'OS::Nova::Flavor': NovaFlavor
}
| apache-2.0 | 4,630,381,481,356,611,000 | 32.898649 | 78 | 0.606936 | false |
Lokke/eden | modules/s3db/cap.py | 1 | 110403 | # -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3CAPModel",
"S3CAPAreaNameModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_alert_list_layout",
"add_area_from_template",
"cap_AssignArea",
"cap_AreaRepresent",
#"cap_gis_location_xml_post_parse",
#"cap_gis_location_xml_post_render",
)
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ("cap_alert",
"cap_alert_represent",
"cap_warning_priority",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_id",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
"cap_template_represent",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geophysical (inc. landslide)")),
("Met", T("Meteorological (inc. flood)")),
("Safety", T("General emergency and public safety")),
("Security", T("Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue and recovery")),
("Fire", T("Fire suppression and rescue")),
("Health", T("Medical and public health")),
("Env", T("Pollution and other environmental")),
("Transport", T("Public and private transportation")),
("Infra", T("Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.cap_template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.cap_template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
# @todo: can not be empty in alerts (validator!)
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
represent = lambda opt: \
cap_alert_status_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(cap_alert_status_code_opts),
),
Field("msg_type",
label = T("Message Type"),
represent = lambda opt: \
cap_alert_msgType_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
),
Field("source",
label = T("Source"),
default = self.generate_source,
),
Field("scope",
label = T("Scope"),
represent = lambda opt: \
cap_alert_scope_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "list:string",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = self.list_string_represent,
),
Field("note", "text",
label = T("Note"),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(selectedList = 10),
),
# approved_on field for recording when the alert was approved
s3_datetime("approved_on",
readable = False,
writable = False,
),
*s3_meta_fields())
list_fields = [(T("Sent"), "sent"),
"scope",
"info.priority",
"info.event_type_id",
"info.sender_name",
"area.name",
]
notify_fields = [(T("Identifier"), "identifier"),
(T("Date"), "sent"),
(T("Status"), "status"),
(T("Message Type"), "msg_type"),
(T("Source"), "source"),
(T("Scope"), "scope"),
(T("Restriction"), "restriction"),
(T("Category"), "info.category"),
(T("Event"), "info.event_type_id"),
(T("Response type"), "info.response_type"),
(T("Priority"), "info.priority"),
(T("Urgency"), "info.urgency"),
(T("Severity"), "info.severity"),
(T("Certainty"), "info.certainty"),
(T("Effective"), "info.effective"),
(T("Expires at"), "info.expires"),
(T("Sender's name"), "info.sender_name"),
(T("Headline"), "info.headline"),
(T("Description"), "info.description"),
(T("Instruction"), "info.instruction"),
(T("Contact information"), "info.contact"),
(T("URL"), "info.web"),
(T("Area Description"), "area.name"),
]
filter_widgets = [
# @ToDo: Radio Button to choose between alert expired, unexpired and all
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3OptionsFilter("info.event_type_id",
),
S3OptionsFilter("info.priority",
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = cap_alert_list_layout,
list_orderby = "cap_info.expires desc",
notify_fields = notify_fields,
onvalidation = self.cap_alert_form_validation,
# update the approved_on field on approve of the alert
onapprove = self.cap_alert_approve,
orderby = "cap_info.expires desc",
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_area_tag = {"name": "tag",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
self.set_method("cap", "alert",
method = "assign",
action = self.cap_AssignArea())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
ADD_ALERT = T("Create Alert")
crud_strings[tablename] = Storage(
label_create = ADD_ALERT,
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# ---------------------------------------------------------------------
# Warning Priorities for CAP
tablename = "cap_warning_priority"
define_table(tablename,
Field("priority_rank", "integer",
label = T("Priority Rank"),
length = 2,
),
Field("event_code",
label = T("Event Code"),
),
Field("name", notnull=True, length=64,
label = T("Name"),
),
Field("event_type",
label = T("Event Type"),
),
Field("urgency",
label = T("Urgency"),
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
label = T("Severity"),
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
label = T("Certainty"),
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("color_code",
label = T("Color Code"),
),
*s3_meta_fields())
priority_represent = S3Represent(lookup=tablename)
crud_strings[tablename] = Storage(
label_create = T("Create Warning Priority"),
title_display = T("Warning Priority Details"),
title_list = T("Warning Priorities"),
title_update = T("Edit Warning Priority"),
title_upload = T("Import Warning Priorities"),
label_list_button = T("List Warning Priorities"),
label_delete_button = T("Delete Warning Priority"),
msg_record_created = T("Warning Priority added"),
msg_record_modified = T("Warning Priority updated"),
msg_record_deleted = T("Warning Priority removed"),
msg_list_empty = T("No Warning Priorities currently registered")
)
configure(tablename,
deduplicate = S3Duplicate(primary=("event_type", "name")),
)
# ---------------------------------------------------------------------
# CAP info priority
# @ToDo: i18n: Need label=T("")
languages = settings.get_cap_languages()
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.cap_template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en-US",
represent = lambda opt: languages.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(languages)
),
),
Field("category", "list:string", # 1 or more allowed
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
required = True,
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(selectedList = 10),
),
Field("event", "text"),
self.event_type_id(empty = False,
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
Field("response_type", "list:string", # 0 or more allowed
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(selectedList = 10),
),
Field("priority", "reference cap_warning_priority",
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_warning_priority.id",
priority_represent
),
),
),
Field("urgency",
represent = lambda opt: \
cap_info_urgency_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
represent = lambda opt: \
cap_info_severity_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
represent = lambda opt: \
cap_info_certainty_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("audience", "text"),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
s3_datetime("effective",
default = "now",
),
s3_datetime("onset"),
s3_datetime("expires",
past = 0,
default = self.get_expirydate,
),
Field("sender_name"),
Field("headline"),
Field("description", "text"),
Field("instruction", "text"),
Field("contact", "text"),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
*s3_meta_fields())
# @ToDo: Move labels into main define_table (can then be lazy & performs better anyway)
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
onaccept = self.info_onaccept,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
),
Field("size", "integer",
writable = False,
),
Field("uri",
# needs a special validation
writable = False,
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
),
Field("digest",
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm(#"name",
"info_id",
"resource_desc",
S3SQLInlineComponent("image",
label=T("Image"),
fields=["file",
],
),
S3SQLInlineComponent("document",
label=T("Document"),
fields=["file",
],
),
)
configure(tablename,
super_entity = "doc_entity",
crud_form = crud_form,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(),
info_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("name",
label = T("Area Description"),
required = True,
),
Field("altitude", "integer", # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
label = T("Altitude"),
),
Field("ceiling", "integer", # Feet above Sea-level in WGS84 (Maximum)
label = T("Ceiling"),
),
# Only used for Templates
self.event_type_id(script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
# Only used for Templates
Field("priority",
label = T("Priority"),
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(
db, "cap_warning_priority.id",
priority_represent
),
),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("alert_id",
"info_id",
"is_template",
"name",
"info_id",
S3SQLInlineComponent("location",
name = "location",
label = "",
multiple = False,
fields = [("", "location_id")],
),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
),
"altitude",
"ceiling",
"event_type_id",
"priority",
)
area_represent = cap_AreaRepresent(show_link=True)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Old: Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
# Names
cap_area_name = {"name": "name",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelector(points = False,
polygons = True,
show_map = True,
catalog_layers = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
create_onaccept = update_alert_id(tablename),
# deduplicate = self.cap_area_tag_deduplicate,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_area_id = area_id,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts,
cap_template_represent = self.cap_template_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y%m%d")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
oid = settings.get_cap_identifier_oid()
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%s-%03d%s%s" % \
(prefix, oid, _time, next_id, ["", "-"][bool(suffix)], suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def generate_source():
"""
Generate a source for CAP alert
"""
return "%s@%s" % (current.xml.domain,
current.deployment_settings.get_base_public_url())
# -------------------------------------------------------------------------
@staticmethod
def get_expirydate():
"""
Default Expiry date based on the expire offset
"""
return current.request.utcnow + \
datetime.timedelta(days = current.deployment_settings.\
get_cap_expire_offset())
# -------------------------------------------------------------------------
@staticmethod
def cap_template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_form_validation(form):
"""
On Validation for CAP alert form
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
return
# -------------------------------------------------------------------------
@staticmethod
def info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
itable.event,
itable.event_type_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
set_ = db(itable.id == info_id)
if alert_id and cap_alert_is_template(alert_id):
set_.update(is_template = True)
if not info.event:
set_.update(event = current.db.cap_info.event_type_id.\
represent(info.event_type_id))
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_approve(record=None):
"""
Update the approved_on field when alert gets approved
"""
if not record:
return
alert_id = record["id"]
# Update approved_on at the time the alert is approved
if alert_id:
db = current.db
approved_on = record["approved_on"]
db(db.cap_alert.id == alert_id).update(approved_on = current.request.utcnow)
# =============================================================================
class S3CAPAreaNameModel(S3Model):
"""
CAP Name Model:
-local names for CAP Area
"""
names = ("cap_area_name",
)
def model(self):
T = current.T
l10n_languages = current.deployment_settings.get_L10n_languages()
# ---------------------------------------------------------------------
# Local Names
#
tablename = "cap_area_name"
self.define_table(tablename,
self.cap_area_id(empty = False,
ondelete = "CASCADE",
),
Field("language",
label = T("Language"),
represent = lambda opt: l10n_languages.get(opt,
current.messages.UNKNOWN_OPT),
requires = IS_ISO639_2_LANGUAGE_CODE(),
),
Field("name_l10n",
label = T("Local Name"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary=("area_id", "language")),
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
alert_id = record.id
itable = s3db.cap_info
row = current.db(itable.alert_id == alert_id).\
select(itable.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Template"), None),
(T("Information template"), "info"),
#(T("Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(s3db.cap_template_represent(alert_id, record),
_href=URL(c="cap", f="template",
args=[alert_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
submit_btn = None
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % alert_id]),
_target="_blank",
)
# Display 'Submit for Approval' based on permission
# and deployment settings
if not r.record.approved_by and \
current.deployment_settings.get_cap_authorisation() and \
current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id):
# Get the user ids for the role alert_approver
db = current.db
agtable = db.auth_group
group_rows = db(agtable.role == "Alert Approver").\
select(agtable.id)
if group_rows:
group_members = current.auth.s3_group_members
user_pe_id = current.auth.s3_user_pe_id
for group_row in group_rows:
group_id = group_row.id
user_ids = group_members(group_id) # List of user_ids
pe_ids = [] # List of pe_ids
pe_append = pe_ids.append
for user_id in user_ids:
pe_append(user_pe_id(int(user_id)))
submit_btn = A(T("Submit for Approval"),
_href = URL(f = "compose",
vars = {"cap_alert.id": record.id,
"pe_ids": pe_ids,
},
),
_class = "action-btn"
)
else:
submit_btn = None
else:
submit_btn = None
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
(T("Resource Files"), "resource"),
]
if r.representation == "html" and \
current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id):
# Check to see if 'Predefined Areas' tab need to be added
artable = s3db.cap_area
query = (artable.is_template == True) & \
(artable.deleted == False)
template_area_rows = current.db(query)._select(artable.id,
limitby=(0, 1))
if template_area_rows:
tabs.insert(2, (T("Predefined Areas"), "assign"))
# Display "Copy" Button to copy record from the opened info
if r.component_name == "info" and \
r.component_id:
copy_btn = A(T("Copy"),
_href = URL(f = "alert",
args = [r.id, "info", "create",],
vars = {"from_record": r.component_id,
},
),
_class = "action-btn"
)
else:
copy_btn = None
else:
copy_btn = None
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(alert_id, record),
_href=URL(c="cap", f="alert",
args=[alert_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
if copy_btn:
rheader.insert(1, TR(TD(copy_btn)))
if submit_btn:
rheader.insert(1, TR(TD(submit_btn)))
elif tablename == "cap_area":
# Used only for Area Templates
tabs = [(T("Area"), None),
]
if current.deployment_settings.get_L10n_translate_cap_area():
tabs.insert(1, (T("Local Names"), "name"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(s3db.cap_template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
# =============================================================================
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location" or tablename == "cap_area_tag":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
# cap_area or cap_resource
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
if alert_id:
db(table.id == _id).update(alert_id = alert_id)
return func
# =============================================================================
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
# =============================================================================
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
# =============================================================================
def cap_alert_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for CAP Alerts on the Home page.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cap_alert.id"]
item_class = "thumbnail"
T = current.T
#raw = record._row
# @ToDo: handle the case where we have multiple info segments &/or areas
headline = record["cap_info.headline"]
location = record["cap_area.name"]
priority = record["cap_info.priority"]
status = record["cap_alert.status"]
scope = record["cap_alert.scope"]
event = record["cap_info.event_type_id"]
if current.auth.s3_logged_in():
_href = URL(c="cap", f="alert", args=[record_id, "profile"])
else:
_href = URL(c="cap", f="public", args=[record_id, "profile"])
headline = A(headline,
_href = _href,
_target = "_blank",
)
if list_id == "map_popup":
itable = current.s3db.cap_info
# Map popup
event = itable.event_type_id.represent(event)
if priority is None:
priority = T("Unknown")
else:
priority = itable.priority.represent(priority)
description = record["cap_info.description"]
response_type = record["cap_info.response_type"]
sender = record["cap_info.sender_name"]
last = TAG[""](BR(),
description,
BR(),
", ".join(response_type),
BR(),
sender,
BR(),
)
else:
if priority == current.messages["NONE"]:
priority = T("Unknown")
last = BR()
details = "%s %s %s" % (priority, status, scope)
more = A(T("Full Alert"),
_href = _href,
_target = "_blank",
)
item = DIV(headline,
BR(),
location,
BR(),
details,
BR(),
event,
last,
more,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def add_area_from_template(area_id, alert_id):
"""
Add an area from a Template along with its components Location and Tag
"""
afieldnames = ("name",
"altitude",
"ceiling",
)
lfieldnames = ("location_id",
)
tfieldnames = ("tag",
"value",
"comments",
)
db = current.db
s3db = current.s3db
atable = s3db.cap_area
ltable = s3db.cap_area_location
ttable = s3db.cap_area_tag
# Create Area Record from Template
atemplate = db(atable.id == area_id).select(limitby=(0, 1),
*afieldnames).first()
adata = {"is_template": False,
"alert_id": alert_id
}
for field in afieldnames:
adata[field] = atemplate[field]
aid = atable.insert(**adata)
# Add Area Location Components of Template
ltemplate = db(ltable.area_id == area_id).select(*lfieldnames)
for rows in ltemplate:
ldata = {"area_id": aid,
"alert_id": alert_id}
for field in lfieldnames:
ldata[field] = rows[field]
lid = ltable.insert(**ldata)
# Add Area Tag Components of Template
ttemplate = db(ttable.area_id == area_id).select(*tfieldnames)
for row in ttemplate:
tdata = {"area_id": aid}
for field in tfieldnames:
tdata[field] = row[field]
tid = ttable.insert(**tdata)
return aid
# =============================================================================
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# -----------------------------------------------------------------------------
class cap_AssignArea(S3Method):
"""
Assign CAP area to an alert, allows (multi-)selection of Predefined areas
"""
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if not r.record:
# Must be called for a particular alert
r.error(404, current.ERROR.BAD_RECORD)
# The record ID of the alert the method is called for
alert_id = r.id
# Requires permission to update this alert
authorised = current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id)
if not authorised:
r.unauthorised()
T = current.T
s3db = current.s3db
response = current.response
# Filter to limit the selection of areas
area_filter = (FS("is_template") == True)
if r.http == "POST":
# Template areas have been selected
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
# Handle exclusion filter
if post_vars.mode == "Exclusive":
# URL filters
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.ajaxURL)
else:
filters = None
query = area_filter & (~(FS("id").belongs(selected)))
aresource = s3db.resource("cap_area",
filter = query,
vars = filters)
rows = aresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
for area_id in selected:
area_id = int(area_id.strip())
add_area_from_template(area_id, alert_id)
added += 1
current.session.confirmation = T("%(number)s assigned") % \
{"number": added}
if added > 0:
# Redirect to the list of areas of this alert
redirect(URL(args=[r.id, "area"], vars={}))
else:
# Return to the "assign" page
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
# Filter widgets (@todo: lookup from cap_area resource config?)
filter_widgets = []
# List fields
list_fields = ["id",
"name",
"event_type_id",
"priority",
]
# Data table
aresource = s3db.resource("cap_area", filter=area_filter)
totalrows = aresource.count()
get_vars = r.get_vars
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
# Datatable filter and sorting
query, orderby, left = aresource.datatable_filter(list_fields,
get_vars,
)
aresource.add_filter(query)
# Extract the data
data = aresource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True,
)
filteredrows = data.numrows
# Instantiate the datatable
dt = S3DataTable(data.rfields, data.rows)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if r.representation == "html":
# Page load
# Disallow deletion from this table, and link all open-buttons
# to the respective area read page
aresource.configure(deletable = False)
profile_url = URL(c = "cap",
f = "area",
args = ["[id]", "read"],
)
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url,
)
# Hide export icons
response.s3.no_formats = True
# Render the datatable (will be "items" in the output dict)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url = URL(args = r.args,
extension="aadata",
vars={},
),
dt_bulk_actions = dt_bulk_actions,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "false",
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
get_vars = aresource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=get_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="cap_area",
args=["filter.options"],
vars={},
)
get_config = aresource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = s3db.resource("cap_area")
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
)
else:
ff = ""
output = {"items": items, # the datatable
"title": T("Add Areas"),
"list_filter_form": ff,
}
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions,
)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(501, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -----------------------------------------------------------------------------
class cap_AreaRepresent(S3Represent):
""" Representation of CAP Area """
def __init__(self,
show_link=False,
multiple=False):
settings = current.deployment_settings
# Translation using cap_area_name & not T()
translate = settings.get_L10n_translate_cap_area()
if translate:
language = current.session.s3.language
if language == settings.get_L10n_default_language():
translate = False
super(cap_AreaRepresent,
self).__init__(lookup="cap_area",
show_link=show_link,
translate=translate,
multiple=multiple
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for Area(CAP) rows.Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the cap_area IDs
"""
db = current.db
s3db = current.s3db
artable = s3db.cap_area
count = len(values)
if count == 1:
query = (artable.id == values[0])
else:
query = (artable.id.belongs(values))
fields = [artable.id,
artable.name,
]
if self.translate:
ltable = s3db.cap_area_name
fields += [ltable.name_l10n,
]
left = [ltable.on((ltable.area_id == artable.id) & \
(ltable.language == current.session.s3.language)),
]
else:
left = None
rows = current.db(query).select(left = left,
limitby = (0, count),
*fields)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the cap_area Row
"""
if self.translate:
name = row["cap_area_name.name_l10n"] or row["cap_area.name"]
else:
name = row["cap_area.name"]
if not name:
return self.default
return s3_unicode(name)
# END =========================================================================
| mit | 8,098,246,358,597,634,000 | 43.661408 | 142 | 0.429789 | false |
holtjma/msbwt | MUS/util.py | 1 | 4898 | '''
Created on Nov 1, 2013
@summary: this file mostly contains some auxiliary checks for the command line interface to make sure it's
handed correct file types
@author: holtjma
'''
import argparse as ap
import glob
import os
#I see no need for the versions to be different as of now
DESC = "A multi-string BWT package for DNA and RNA."
VERSION = '0.3.0'
PKG_VERSION = VERSION
validCharacters = set(['$', 'A', 'C', 'G', 'N', 'T'])
def readableFastqFile(fileName):
'''
@param filename - must be both an existing and readable fastq file, supported under '.txt' and '.gz' as of now
'''
if os.path.isfile(fileName) and os.access(fileName, os.R_OK):
if fileName.endswith('.txt') or fileName.endswith('.gz') or fileName.endswith('.fastq') or fileName.endswith('.fq'):
return fileName
else:
raise ap.ArgumentTypeError("Wrong file format ('.txt', '.gz', '.fastq', or '.fq' required): '%s'" % fileName)
else:
raise ap.ArgumentTypeError("Cannot read file '%s'." % fileName)
'''
TODO: REMOVE UNUSED FUNCTION
'''
def readableNpyFile(fileName):
if os.path.isfile(fileName) and os.access(fileName, os.R_OK):
if fileName.endswith('.npy'):
return fileName
else:
raise ap.ArgumentTypeError("Wrong file format ('.npy' required): '%s'" % fileName)
else:
raise ap.ArgumentTypeError("Cannot read file '%s'." % fileName)
'''
TODO: REMOVE UNUSED FUNCTION
'''
def writableNpyFile(fileName):
if os.access(os.path.dirname(fileName), os.W_OK):
if fileName.endswith('.npy'):
return fileName
else:
raise ap.ArgumentTypeError("Wrong file format ('.npy' required): '%s'." % fileName)
else:
raise ap.ArgumentTypeError("Cannot write file '%s'." % fileName)
def newDirectory(dirName):
'''
@param dirName - will make a directory with this name, aka, this must be a new directory
'''
#strip any tail '/'
if dirName[-1] == '/':
dirName = dirName[0:-1]
if os.path.exists(dirName):
if len(glob.glob(dirName+'/*')) != 0:
raise ap.ArgumentTypeError("Non-empty directory already exists: '%s'" % dirName)
else:
#this can raise it's own exception
os.makedirs(dirName)
return dirName
def existingDirectory(dirName):
'''
@param dirName - checks to make sure this directory already exists
TODO: add checks for the bwt files?
'''
#strip any tail '/'
if dirName[-1] == '/':
dirName = dirName[0:-1]
if os.path.isdir(dirName):
return dirName
else:
raise ap.ArgumentTypeError("Directory does not exist: '%s'" % dirName)
def newOrExistingDirectory(dirName):
'''
@param dirName - the directory could be pre-existing, if not it's created
'''
if dirName[-1] == '/':
dirName = dirName[0:-1]
if os.path.isdir(dirName):
return dirName
elif os.path.exists(dirName):
ap.ArgumentTypeError("'%s' exists but is not a directory" % dirName)
else:
os.makedirs(dirName)
return dirName
def validKmer(kmer):
'''
@param kmer - must be contained in the characters used for our sequencing
'''
for c in kmer:
if not (c in validCharacters):
raise ap.ArgumentTypeError("Invalid k-mer: All characters must be in ($, A, C, G, N, T)")
return kmer
def fastaIterator(fastaFN):
'''
Iterator that yields tuples containing a sequence label and the sequence itself
@param fastaFN - the FASTA filename to open and parse
@return - an iterator yielding tuples of the form (label, sequence) from the FASTA file
'''
if fastaFN[len(fastaFN)-3:] == '.gz':
fp = gzip.open(fastaFN, 'r')
else:
fp = open(fastaFN, 'r')
label = ''
segments = []
line = ''
for line in fp:
if line[0] == '>':
if label != '':
yield (label, ''.join(segments))
label = (line.strip('\n')[1:]).split(' ')[0]
segments = []
else:
segments.append(line.strip('\n'))
if label != '' and len(segments) > 0:
yield (label, ''.join(segments))
fp.close()
def fastqIterator(fastqFN):
if fastqFN[len(fastqFN)-3:] == '.gz':
fp = gzip.open(fastqFN, 'r')
else:
fp = open(fastqFN, 'r')
l1 = ''
seq = ''
l2 = ''
quals = ''
i = 0
for line in fp:
if i & 0x3 == 0:
l1 = line.strip('\n')
elif i & 0x3 == 1:
seq = line.strip('\n')
elif i & 0x3 == 2:
l2 = line.strip('\n')
else:
quals = line.strip('\n')
yield (l1, seq, l2, quals)
l1 = ''
seq = ''
l2 = ''
quals = ''
i += 1
fp.close()
| mit | -1,118,803,294,601,152,300 | 28.506024 | 124 | 0.569212 | false |
giuseppe/virt-manager | virtManager/uihelpers.py | 1 | 44094 | #
# Copyright (C) 2009, 2013, 2014 Red Hat, Inc.
# Copyright (C) 2009 Cole Robinson <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import logging
import os
import statvfs
import pwd
# pylint: disable=E0611
from gi.repository import GObject
from gi.repository import Gtk
# pylint: enable=E0611
import libvirt
import virtinst
from virtManager import config
OPTICAL_DEV_PATH = 0
OPTICAL_LABEL = 1
OPTICAL_IS_MEDIA_PRESENT = 2
OPTICAL_DEV_KEY = 3
OPTICAL_MEDIA_KEY = 4
OPTICAL_IS_VALID = 5
try:
import gi
gi.check_version("3.7.4")
can_set_row_none = True
except (ValueError, AttributeError):
can_set_row_none = False
vm_status_icons = {
libvirt.VIR_DOMAIN_BLOCKED: "state_running",
libvirt.VIR_DOMAIN_CRASHED: "state_shutoff",
libvirt.VIR_DOMAIN_PAUSED: "state_paused",
libvirt.VIR_DOMAIN_RUNNING: "state_running",
libvirt.VIR_DOMAIN_SHUTDOWN: "state_shutoff",
libvirt.VIR_DOMAIN_SHUTOFF: "state_shutoff",
libvirt.VIR_DOMAIN_NOSTATE: "state_running",
# VIR_DOMAIN_PMSUSPENDED
7: "state_paused",
}
############################################################
# Helpers for shared storage UI between create/addhardware #
############################################################
def set_sparse_tooltip(widget):
sparse_str = _("Fully allocating storage may take longer now, "
"but the OS install phase will be quicker. \n\n"
"Skipping allocation can also cause space issues on "
"the host machine, if the maximum image size exceeds "
"available storage space. \n\n"
"Tip: Storage format qcow2 and qed "
"do not support full allocation.")
widget.set_tooltip_text(sparse_str)
def host_disk_space(conn):
pool = get_default_pool(conn)
path = get_default_dir(conn)
avail = 0
if pool and pool.is_active():
# FIXME: make sure not inactive?
# FIXME: use a conn specific function after we send pool-added
pool.refresh()
avail = int(pool.get_available())
elif not conn.is_remote() and os.path.exists(path):
vfs = os.statvfs(os.path.dirname(path))
avail = vfs[statvfs.F_FRSIZE] * vfs[statvfs.F_BAVAIL]
return float(avail / 1024.0 / 1024.0 / 1024.0)
def update_host_space(conn, widget):
try:
max_storage = host_disk_space(conn)
except:
logging.exception("Error determining host disk space")
return
def pretty_storage(size):
return "%.1f GB" % float(size)
hd_label = ("%s available in the default location" %
pretty_storage(max_storage))
hd_label = ("<span color='#484848'>%s</span>" % hd_label)
widget.set_markup(hd_label)
def check_default_pool_active(err, conn):
default_pool = get_default_pool(conn)
if default_pool and not default_pool.is_active():
res = err.yes_no(_("Default pool is not active."),
_("Storage pool '%s' is not active. "
"Would you like to start the pool "
"now?") % default_pool.get_name())
if not res:
return False
# Try to start the pool
try:
default_pool.start()
logging.info("Started pool '%s'", default_pool.get_name())
except Exception, e:
return err.show_err(_("Could not start storage_pool "
"'%s': %s") %
(default_pool.get_name(), str(e)))
return True
#####################################################
# Hardware model list building (for details, addhw) #
#####################################################
def set_combo_text_column(combo, col):
if combo.get_has_entry():
combo.set_entry_text_column(col)
else:
text = Gtk.CellRendererText()
combo.pack_start(text, True)
combo.add_attribute(text, 'text', col)
def build_video_combo(vm, combo, no_default=None):
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
combo.get_model().set_sort_column_id(1, Gtk.SortType.ASCENDING)
populate_video_combo(vm, combo, no_default)
def populate_video_combo(vm, combo, no_default=None):
model = combo.get_model()
has_spice = bool([g for g in vm.get_graphics_devices()
if g.type == g.TYPE_SPICE])
has_qxl = bool([v for v in vm.get_video_devices()
if v.model == "qxl"])
model.clear()
tmpdev = virtinst.VirtualVideoDevice(vm.conn.get_backend())
for m in tmpdev.MODELS:
if vm.stable_defaults():
if m == "qxl" and not has_spice and not has_qxl:
# Only list QXL video option when VM has SPICE video
continue
if m == tmpdev.MODEL_DEFAULT and no_default:
continue
model.append([m, tmpdev.pretty_model(m)])
if len(model) > 0:
combo.set_active(0)
def build_sound_combo(vm, combo, no_default=False):
model = Gtk.ListStore(str)
combo.set_model(model)
set_combo_text_column(combo, 0)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
stable_defaults = vm.stable_defaults()
stable_soundmodels = ["ich6", "ac97"]
for m in virtinst.VirtualAudio.MODELS:
if m == virtinst.VirtualAudio.MODEL_DEFAULT and no_default:
continue
if (stable_defaults and m not in stable_soundmodels):
continue
model.append([m])
if len(model) > 0:
combo.set_active(0)
def build_watchdogmodel_combo(vm, combo, no_default=False):
ignore = vm
model = Gtk.ListStore(str)
combo.set_model(model)
set_combo_text_column(combo, 0)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
for m in virtinst.VirtualWatchdog.MODELS:
if m == virtinst.VirtualAudio.MODEL_DEFAULT and no_default:
continue
model.append([m])
if len(model) > 0:
combo.set_active(0)
def build_watchdogaction_combo(vm, combo, no_default=False):
ignore = vm
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
for m in virtinst.VirtualWatchdog.ACTIONS:
if m == virtinst.VirtualWatchdog.ACTION_DEFAULT and no_default:
continue
model.append([m, virtinst.VirtualWatchdog.get_action_desc(m)])
if len(model) > 0:
combo.set_active(0)
def build_source_mode_combo(vm, combo):
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
populate_source_mode_combo(vm, combo)
combo.set_active(0)
def populate_source_mode_combo(vm, combo):
ignore = vm
model = combo.get_model()
model.clear()
# [xml value, label]
model.append([None, "Default"])
model.append(["vepa", "VEPA"])
model.append(["bridge", "Bridge"])
model.append(["private", "Private"])
model.append(["passthrough", "Passthrough"])
def build_smartcard_mode_combo(vm, combo):
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
populate_smartcard_mode_combo(vm, combo)
idx = -1
for rowid in range(len(combo.get_model())):
idx = 0
row = combo.get_model()[rowid]
if row[0] == virtinst.VirtualSmartCardDevice.MODE_DEFAULT:
idx = rowid
break
combo.set_active(idx)
def populate_smartcard_mode_combo(vm, combo):
ignore = vm
model = combo.get_model()
model.clear()
# [xml value, label]
model.append(["passthrough", "Passthrough"])
model.append(["host", "Host"])
def build_redir_type_combo(vm, combo):
model = Gtk.ListStore(str, str, bool)
combo.set_model(model)
set_combo_text_column(combo, 1)
populate_redir_type_combo(vm, combo)
combo.set_active(0)
def populate_redir_type_combo(vm, combo):
ignore = vm
model = combo.get_model()
model.clear()
# [xml value, label, conn details]
model.append(["spicevmc", "Spice channel", False])
model.append(["tcp", "TCP", True])
def build_tpm_type_combo(vm, combo):
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
populate_tpm_type_combo(vm, combo)
idx = -1
for rowid in range(len(combo.get_model())):
idx = 0
row = combo.get_model()[rowid]
if row[0] == virtinst.VirtualTPMDevice.TYPE_DEFAULT:
idx = rowid
break
combo.set_active(idx)
def populate_tpm_type_combo(vm, combo):
ignore = vm
types = combo.get_model()
types.clear()
# [xml value, label]
for t in virtinst.VirtualTPMDevice.TYPES:
types.append([t, virtinst.VirtualTPMDevice.get_pretty_type(t)])
def build_netmodel_combo(vm, combo):
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
populate_netmodel_combo(vm, combo)
combo.set_active(0)
def populate_netmodel_combo(vm, combo):
model = combo.get_model()
model.clear()
# [xml value, label]
model.append([None, _("Hypervisor default")])
if vm.is_hvm():
mod_list = ["rtl8139", "ne2k_pci", "pcnet", "e1000"]
if vm.get_hv_type() in ["kvm", "qemu", "test"]:
mod_list.append("virtio")
if (vm.get_hv_type() == "kvm" and
vm.get_machtype() == "pseries"):
mod_list.append("spapr-vlan")
if vm.get_hv_type() in ["xen", "test"]:
mod_list.append("netfront")
mod_list.sort()
for m in mod_list:
model.append([m, m])
def build_cache_combo(vm, combo):
ignore = vm
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
combo.set_active(-1)
for m in virtinst.VirtualDisk.cache_types:
model.append([m, m])
_iter = model.insert(0, [None, "default"])
combo.set_active_iter(_iter)
def build_io_combo(vm, combo, no_default=False):
ignore = vm
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
combo.set_active(-1)
for m in virtinst.VirtualDisk.io_modes:
model.append([m, m])
if not no_default:
model.append([None, "default"])
combo.set_active(0)
def build_disk_bus_combo(vm, combo, no_default=False):
ignore = vm
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
model.set_sort_column_id(1, Gtk.SortType.ASCENDING)
if not no_default:
model.append([None, "default"])
combo.set_active(-1)
def build_vnc_keymap_combo(vm, combo, no_default=False):
ignore = vm
model = Gtk.ListStore(str, str)
combo.set_model(model)
set_combo_text_column(combo, 1)
if not no_default:
model.append([None, "default"])
else:
model.append([None, "Auto"])
model.append([virtinst.VirtualGraphics.KEYMAP_LOCAL,
"Copy local keymap"])
for k in virtinst.VirtualGraphics.valid_keymaps():
model.append([k, k])
combo.set_active(-1)
#####################################
# Storage format list/combo helpers #
#####################################
def update_storage_format_combo(vm, combo, create):
model = Gtk.ListStore(str)
combo.set_model(model)
set_combo_text_column(combo, 0)
formats = ["raw", "qcow2", "qed"]
no_create_formats = []
if not vm.stable_defaults():
formats.append("vmdk")
no_create_formats.append("vdi")
for m in formats:
model.append([m])
if not create:
for m in no_create_formats:
model.append([m])
if create:
combo.set_active(0)
#######################################################################
# Widgets for listing network device options (in create, addhardware) #
#######################################################################
def pretty_network_desc(nettype, source=None, netobj=None):
if nettype == virtinst.VirtualNetworkInterface.TYPE_USER:
return _("Usermode networking")
extra = None
if nettype == virtinst.VirtualNetworkInterface.TYPE_BRIDGE:
ret = _("Bridge")
elif nettype == virtinst.VirtualNetworkInterface.TYPE_VIRTUAL:
ret = _("Virtual network")
if netobj:
extra = ": %s" % netobj.pretty_forward_mode()
else:
ret = nettype.capitalize()
if source:
ret += " '%s'" % source
if extra:
ret += " %s" % extra
return ret
def init_network_list(net_list, bridge_box, source_mode_combo=None,
vport_expander=None):
# [ network type, source name, label, sensitive?, net is active,
# manual bridge, net instance]
net_model = Gtk.ListStore(str, str, str, bool, bool, bool, object)
net_list.set_model(net_model)
net_list.connect("changed", net_list_changed, bridge_box,
source_mode_combo, vport_expander)
text = Gtk.CellRendererText()
net_list.pack_start(text, True)
net_list.add_attribute(text, 'text', 2)
net_list.add_attribute(text, 'sensitive', 3)
def net_list_changed(net_list, bridge_box,
source_mode_combo, vport_expander):
active = net_list.get_active()
if active < 0:
return
if not bridge_box:
return
row = net_list.get_model()[active]
if source_mode_combo is not None:
doshow = (row[0] == virtinst.VirtualNetworkInterface.TYPE_DIRECT)
set_grid_row_visible(source_mode_combo, doshow)
vport_expander.set_visible(doshow)
show_bridge = row[5]
set_grid_row_visible(bridge_box, show_bridge)
def get_network_selection(net_list, bridge_entry):
idx = net_list.get_active()
if idx == -1:
return None, None
row = net_list.get_model()[net_list.get_active()]
net_type = row[0]
net_src = row[1]
net_check_bridge = row[5]
if net_check_bridge and bridge_entry:
net_type = virtinst.VirtualNetworkInterface.TYPE_BRIDGE
net_src = bridge_entry.get_text()
return net_type, net_src
def populate_network_list(net_list, conn, show_direct_interfaces=True):
model = net_list.get_model()
model.clear()
vnet_bridges = []
vnet_dict = {}
bridge_dict = {}
iface_dict = {}
def build_row(nettype, name, label, is_sensitive, is_running,
manual_bridge=False, key=None):
return [nettype, name, label,
is_sensitive, is_running, manual_bridge,
key]
def set_active(idx):
net_list.set_active(idx)
def add_dict(indict, model):
keylist = indict.keys()
keylist.sort()
rowlist = [indict[k] for k in keylist]
for row in rowlist:
model.append(row)
# For qemu:///session
if conn.is_qemu_session():
nettype = virtinst.VirtualNetworkInterface.TYPE_USER
r = build_row(nettype, None, pretty_network_desc(nettype), True, True)
model.append(r)
set_active(0)
return
hasNet = False
netIdxLabel = None
# Virtual Networks
for uuid in conn.list_net_uuids():
net = conn.get_net(uuid)
nettype = virtinst.VirtualNetworkInterface.TYPE_VIRTUAL
label = pretty_network_desc(nettype, net.get_name(), net)
if not net.is_active():
label += " (%s)" % _("Inactive")
hasNet = True
# FIXME: Should we use 'default' even if it's inactive?
# FIXME: This preference should be configurable
if net.get_name() == "default":
netIdxLabel = label
vnet_dict[label] = build_row(nettype, net.get_name(), label, True,
net.is_active(), key=net.get_uuid())
# Build a list of vnet bridges, so we know not to list them
# in the physical interface list
vnet_bridge = net.get_bridge_device()
if vnet_bridge:
vnet_bridges.append(vnet_bridge)
if not hasNet:
label = _("No virtual networks available")
vnet_dict[label] = build_row(None, None, label, False, False)
vnet_taps = []
for vm in conn.vms.values():
for nic in vm.get_network_devices(refresh_if_nec=False):
if nic.target_dev and nic.target_dev not in vnet_taps:
vnet_taps.append(nic.target_dev)
skip_ifaces = ["lo"]
# Physical devices
hasShared = False
brIdxLabel = None
for name in conn.list_net_device_paths():
br = conn.get_net_device(name)
bridge_name = br.get_bridge()
nettype = virtinst.VirtualNetworkInterface.TYPE_BRIDGE
if ((bridge_name in vnet_bridges) or
(br.get_name() in vnet_bridges) or
(br.get_name() in vnet_taps) or
(br.get_name() in [v + "-nic" for v in vnet_bridges]) or
(br.get_name() in skip_ifaces)):
# Don't list this, as it is basically duplicating virtual net info
continue
if br.is_shared():
sensitive = True
if br.get_bridge():
hasShared = True
brlabel = "(%s)" % pretty_network_desc(nettype, bridge_name)
else:
bridge_name = name
brlabel = _("(Empty bridge)")
else:
if (show_direct_interfaces and
conn.check_support(
conn.SUPPORT_CONN_DIRECT_INTERFACE)):
sensitive = True
nettype = virtinst.VirtualNetworkInterface.TYPE_DIRECT
bridge_name = name
brlabel = ": %s" % _("macvtap")
else:
sensitive = False
brlabel = "(%s)" % _("Not bridged")
label = _("Host device %s %s") % (br.get_name(), brlabel)
if hasShared and not brIdxLabel:
brIdxLabel = label
row = build_row(nettype, bridge_name, label, sensitive, True,
key=br.get_name())
if sensitive:
bridge_dict[label] = row
else:
iface_dict[label] = row
add_dict(bridge_dict, model)
add_dict(vnet_dict, model)
add_dict(iface_dict, model)
# If there is a bridge device, default to that
# If not, use 'default' network
# If not present, use first list entry
# If list empty, use no network devices
return_warn = False
label = brIdxLabel or netIdxLabel
for idx in range(len(model)):
row = model[idx]
is_inactive = not row[4]
if label:
if row[2] == label:
default = idx
return_warn = is_inactive
break
else:
if row[3] is True:
default = idx
return_warn = is_inactive
break
else:
return_warn = True
row = build_row(None, None, _("No networking"), True, False)
model.insert(0, row)
default = 0
# After all is said and done, add a manual bridge option
manual_row = build_row(None, None, _("Specify shared device name"),
True, False, manual_bridge=True)
model.append(manual_row)
set_active(default)
return return_warn
def validate_network(err, conn, nettype, devname, macaddr, model=None):
net = None
if nettype is None:
return None
# Make sure VirtualNetwork is running
netobj = None
if nettype == virtinst.VirtualNetworkInterface.TYPE_VIRTUAL:
for net in conn.nets.values():
if net.get_name() == devname:
netobj = net
break
if netobj and not netobj.is_active():
res = err.yes_no(_("Virtual Network is not active."),
_("Virtual Network '%s' is not active. "
"Would you like to start the network "
"now?") % devname)
if not res:
return False
# Try to start the network
try:
netobj.start()
netobj.tick()
logging.info("Started network '%s'", devname)
except Exception, e:
return err.show_err(_("Could not start virtual network "
"'%s': %s") % (devname, str(e)))
# Create network device
try:
net = virtinst.VirtualNetworkInterface(conn.get_backend())
net.type = nettype
net.source = devname
net.macaddr = macaddr
net.model = model
if net.model == "spapr-vlan":
net.address.set_addrstr("spapr-vio")
except Exception, e:
return err.val_err(_("Error with network parameters."), e)
# Make sure there is no mac address collision
isfatal, errmsg = net.is_conflict_net(conn.get_backend(), net.macaddr)
if isfatal:
return err.val_err(_("Mac address collision."), errmsg)
elif errmsg is not None:
retv = err.yes_no(_("Mac address collision."),
_("%s Are you sure you want to use this "
"address?") % errmsg)
if not retv:
return False
return net
############################################
# Populate media widget (choosecd, create) #
############################################
def init_mediadev_combo(widget):
# [Device path, pretty label, has_media?, device key, media key,
# vmmMediaDevice, is valid device]
model = Gtk.ListStore(str, str, bool, str, str, bool)
widget.set_model(model)
model.clear()
text = Gtk.CellRendererText()
widget.pack_start(text, True)
widget.add_attribute(text, 'text', OPTICAL_LABEL)
widget.add_attribute(text, 'sensitive', OPTICAL_IS_VALID)
def populate_mediadev_combo(conn, widget, devtype):
sigs = []
model = widget.get_model()
model.clear()
set_mediadev_default(model)
sigs.append(conn.connect("mediadev-added", mediadev_added, widget, devtype))
sigs.append(conn.connect("mediadev-removed", mediadev_removed, widget))
widget.set_active(-1)
mediadev_set_default_selection(widget)
return sigs
def set_mediadev_default(model):
if len(model) == 0:
model.append([None, _("No device present"), False, None, None, False])
def set_row_from_object(row, obj):
row[OPTICAL_DEV_PATH] = obj.get_path()
row[OPTICAL_LABEL] = obj.pretty_label()
row[OPTICAL_IS_MEDIA_PRESENT] = obj.has_media()
row[OPTICAL_DEV_KEY] = obj.get_key()
row[OPTICAL_MEDIA_KEY] = obj.get_media_key()
row[OPTICAL_IS_VALID] = True
def mediadev_removed(ignore_helper, key, widget):
model = widget.get_model()
active = widget.get_active()
idx = 0
for row in model:
if row[OPTICAL_DEV_KEY] == key:
# Whole device removed
del(model[idx])
if idx > active and active != -1:
widget.set_active(active - 1)
elif idx == active:
widget.set_active(-1)
idx += 1
set_mediadev_default(model)
mediadev_set_default_selection(widget)
def mediadev_added(ignore_helper, newobj, widget, devtype):
model = widget.get_model()
if newobj.get_media_type() != devtype:
return
if model is None:
return
if len(model) == 1 and model[0][OPTICAL_IS_VALID] is False:
# Only entry is the 'No device' entry
model.clear()
newobj.connect("media-added", mediadev_media_changed, widget)
newobj.connect("media-removed", mediadev_media_changed, widget)
# Brand new device
row = [None, None, None, None, None, None]
set_row_from_object(row, newobj)
model.append(row)
mediadev_set_default_selection(widget)
def mediadev_media_changed(newobj, widget):
model = widget.get_model()
active = widget.get_active()
idx = 0
# Search for the row with matching device node and
# fill in info about inserted media. If model has no current
# selection, select the new media.
for row in model:
if row[OPTICAL_DEV_PATH] == newobj.get_path():
set_row_from_object(row, newobj)
has_media = row[OPTICAL_IS_MEDIA_PRESENT]
if has_media and active == -1:
widget.set_active(idx)
elif not has_media and active == idx:
widget.set_active(-1)
idx = idx + 1
mediadev_set_default_selection(widget)
def mediadev_set_default_selection(widget):
# Set the first active cdrom device as selected, otherwise none
model = widget.get_model()
idx = 0
active = widget.get_active()
if active != -1:
# already a selection, don't change it
return
for row in model:
if row[OPTICAL_IS_MEDIA_PRESENT] is True:
widget.set_active(idx)
return
idx += 1
widget.set_active(-1)
####################################################################
# Build toolbar shutdown button menu (manager and details toolbar) #
####################################################################
class _VMMenu(Gtk.Menu):
# pylint: disable=E1101
# pylint can't detect functions we inheirit from Gtk, ex self.add
def __init__(self, src, current_vm_cb, show_open=True):
Gtk.Menu.__init__(self)
self._parent = src
self._current_vm_cb = current_vm_cb
self._show_open = show_open
self._init_state()
def _add_action(self, label, signal,
iconname="system-shutdown", addcb=True):
if label.startswith("gtk-"):
item = Gtk.ImageMenuItem.new_from_stock(label, None)
else:
item = Gtk.ImageMenuItem.new_with_mnemonic(label)
if iconname:
if iconname.startswith("gtk-"):
icon = Gtk.Image.new_from_stock(iconname, Gtk.IconSize.MENU)
else:
icon = Gtk.Image.new_from_icon_name(iconname,
Gtk.IconSize.MENU)
item.set_image(icon)
item.vmm_widget_name = signal
if addcb:
item.connect("activate", self._action_cb)
self.add(item)
return item
def _action_cb(self, src):
vm = self._current_vm_cb()
if not vm:
return
self._parent.emit("action-%s-domain" % src.vmm_widget_name,
vm.conn.get_uri(), vm.get_uuid())
def _init_state(self):
raise NotImplementedError()
def update_widget_states(self, vm):
raise NotImplementedError()
class VMShutdownMenu(_VMMenu):
# pylint: disable=E1101
# pylint can't detect functions we inheirit from Gtk, ex self.add
def _init_state(self):
self._add_action(_("_Reboot"), "reboot")
self._add_action(_("_Shut Down"), "shutdown")
self._add_action(_("F_orce Reset"), "reset")
self._add_action(_("_Force Off"), "destroy")
self.add(Gtk.SeparatorMenuItem())
self._add_action(_("Sa_ve"), "save", iconname=Gtk.STOCK_SAVE)
self.show_all()
def update_widget_states(self, vm):
statemap = {
"reboot": bool(vm and vm.is_stoppable()),
"shutdown": bool(vm and vm.is_stoppable()),
"reset": bool(vm and vm.is_stoppable()),
"save": bool(vm and vm.is_destroyable()),
"destroy": bool(vm and vm.is_destroyable()),
}
for child in self.get_children():
name = getattr(child, "vmm_widget_name", None)
if name in statemap:
child.set_sensitive(statemap[name])
class VMActionMenu(_VMMenu):
# pylint: disable=E1101
# pylint can't detect functions we inheirit from Gtk, ex self.add
def _init_state(self):
self._add_action(_("_Run"), "run", Gtk.STOCK_MEDIA_PLAY)
self._add_action(_("_Pause"), "suspend", Gtk.STOCK_MEDIA_PAUSE)
self._add_action(_("R_esume"), "resume", Gtk.STOCK_MEDIA_PAUSE)
s = self._add_action(_("_Shut Down"), "shutdown", addcb=False)
s.set_submenu(VMShutdownMenu(self._parent, self._current_vm_cb))
self.add(Gtk.SeparatorMenuItem())
self._add_action(_("Clone..."), "clone", None)
self._add_action(_("Migrate..."), "migrate", None)
self._add_action(_("_Delete"), "delete", Gtk.STOCK_DELETE)
if self._show_open:
self.add(Gtk.SeparatorMenuItem())
self._add_action(Gtk.STOCK_OPEN, "show", None)
self.show_all()
def update_widget_states(self, vm):
statemap = {
"run": bool(vm and vm.is_runable()),
"shutdown": bool(vm and vm.is_stoppable()),
"suspend": bool(vm and vm.is_stoppable()),
"resume": bool(vm and vm.is_paused()),
"migrate": bool(vm and vm.is_stoppable()),
"clone": bool(vm and not vm.is_read_only()),
}
vismap = {
"suspend": bool(vm and not vm.is_paused()),
"resume": bool(vm and vm.is_paused()),
}
for child in self.get_children():
name = getattr(child, "vmm_widget_name", None)
if hasattr(child, "update_widget_states"):
child.update_widget_states(vm)
if name in statemap:
child.set_sensitive(statemap[name])
if name in vismap:
child.set_visible(vismap[name])
def change_run_text(self, text):
for child in self.get_children():
if getattr(child, "vmm_widget_name", None) == "run":
child.get_child().set_label(text)
#####################################
# Path permissions checker for qemu #
#####################################
def check_path_search_for_qemu(err, conn, path):
if conn.is_remote() or not conn.is_qemu_system():
return
user = config.running_config.default_qemu_user
for i in conn.caps.host.secmodels:
if i.model == "dac":
label = i.baselabels.get("kvm") or i.baselabels.get("qemu")
if not label:
continue
pwuid = pwd.getpwuid(int(label.split(":")[0].replace("+", "")))
if pwuid:
user = pwuid[0]
skip_paths = config.running_config.get_perms_fix_ignore()
broken_paths = virtinst.VirtualDisk.check_path_search_for_user(
conn.get_backend(),
path, user)
for p in broken_paths:
if p in skip_paths:
broken_paths.remove(p)
if not broken_paths:
return
logging.debug("No search access for dirs: %s", broken_paths)
resp, chkres = err.warn_chkbox(
_("The emulator may not have search permissions "
"for the path '%s'.") % path,
_("Do you want to correct this now?"),
_("Don't ask about these directories again."),
buttons=Gtk.ButtonsType.YES_NO)
if chkres:
config.running_config.add_perms_fix_ignore(broken_paths)
if not resp:
return
logging.debug("Attempting to correct permission issues.")
errors = virtinst.VirtualDisk.fix_path_search_for_user(conn.get_backend(),
path, user)
if not errors:
return
errmsg = _("Errors were encountered changing permissions for the "
"following directories:")
details = ""
for path, error in errors.items():
if path not in broken_paths:
continue
details += "%s : %s\n" % (path, error)
logging.debug("Permission errors:\n%s", details)
ignore, chkres = err.err_chkbox(errmsg, details,
_("Don't ask about these directories again."))
if chkres:
config.running_config.add_perms_fix_ignore(errors.keys())
######################################
# Interface startmode widget builder #
######################################
def build_startmode_combo(combo):
model = Gtk.ListStore(str)
combo.set_model(model)
set_combo_text_column(combo, 0)
model.append(["none"])
model.append(["onboot"])
model.append(["hotplug"])
#########################
# Console keycombo menu #
#########################
def build_keycombo_menu(cb):
menu = Gtk.Menu()
def make_item(name, combo):
item = Gtk.MenuItem.new_with_mnemonic(name)
item.connect("activate", cb, combo)
menu.add(item)
make_item("Ctrl+Alt+_Backspace", ["Control_L", "Alt_L", "BackSpace"])
make_item("Ctrl+Alt+_Delete", ["Control_L", "Alt_L", "Delete"])
menu.add(Gtk.SeparatorMenuItem())
for i in range(1, 13):
make_item("Ctrl+Alt+F_%d" % i, ["Control_L", "Alt_L", "F%d" % i])
menu.add(Gtk.SeparatorMenuItem())
make_item("_Printscreen", ["Print"])
menu.show_all()
return menu
#############
# Misc bits #
#############
def spin_get_helper(widget):
adj = widget.get_adjustment()
txt = widget.get_text()
try:
ret = int(txt)
except:
ret = adj.get_value()
return ret
def get_ideal_path_info(conn, name):
path = get_default_dir(conn)
suffix = ".img"
return (path, name, suffix)
def get_ideal_path(conn, name):
target, name, suffix = get_ideal_path_info(conn, name)
return os.path.join(target, name) + suffix
def get_default_pool(conn):
pool = None
for uuid in conn.list_pool_uuids():
p = conn.get_pool(uuid)
if p.get_name() == "default":
pool = p
return pool
def get_default_dir(conn):
pool = get_default_pool(conn)
if pool:
return pool.get_target_path()
else:
return config.running_config.get_default_image_dir(conn)
def get_default_path(conn, name, collidelist=None):
collidelist = collidelist or []
pool = get_default_pool(conn)
default_dir = get_default_dir(conn)
def path_exists(p):
return os.path.exists(p) or p in collidelist
if not pool:
# Use old generating method
origf = os.path.join(default_dir, name + ".img")
f = origf
n = 1
while path_exists(f) and n < 100:
f = os.path.join(default_dir, name +
"-" + str(n) + ".img")
n += 1
if path_exists(f):
f = origf
path = f
else:
target, ignore, suffix = get_ideal_path_info(conn, name)
# Sanitize collidelist to work with the collision checker
newcollidelist = []
for c in collidelist:
if c and os.path.dirname(c) == pool.get_target_path():
newcollidelist.append(os.path.basename(c))
path = virtinst.StorageVolume.find_free_name(
pool.get_backend(), name,
suffix=suffix, collidelist=newcollidelist)
path = os.path.join(target, path)
return path
def browse_local(parent, dialog_name, conn, start_folder=None,
_type=None, dialog_type=None,
confirm_func=None, browse_reason=None,
choose_button=None, default_name=None):
"""
Helper function for launching a filechooser
@param parent: Parent window for the filechooser
@param dialog_name: String to use in the title bar of the filechooser.
@param conn: vmmConnection used by calling class
@param start_folder: Folder the filechooser is viewing at startup
@param _type: File extension to filter by (e.g. "iso", "png")
@param dialog_type: Maps to FileChooserDialog 'action'
@param confirm_func: Optional callback function if file is chosen.
@param browse_reason: The vmmConfig.CONFIG_DIR* reason we are browsing.
If set, this will override the 'folder' parameter with the gconf
value, and store the user chosen path.
"""
# Initial setup
overwrite_confirm = False
if dialog_type is None:
dialog_type = Gtk.FileChooserAction.OPEN
if dialog_type == Gtk.FileChooserAction.SAVE:
if choose_button is None:
choose_button = Gtk.STOCK_SAVE
overwrite_confirm = True
if choose_button is None:
choose_button = Gtk.STOCK_OPEN
fcdialog = Gtk.FileChooserDialog(title=dialog_name,
parent=parent,
action=dialog_type,
buttons=(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
choose_button,
Gtk.ResponseType.ACCEPT))
fcdialog.set_default_response(Gtk.ResponseType.ACCEPT)
if default_name:
fcdialog.set_current_name(default_name)
# If confirm is set, warn about a file overwrite
if confirm_func:
overwrite_confirm = True
fcdialog.connect("confirm-overwrite", confirm_func)
fcdialog.set_do_overwrite_confirmation(overwrite_confirm)
# Set file match pattern (ex. *.png)
if _type is not None:
pattern = _type
name = None
if type(_type) is tuple:
pattern = _type[0]
name = _type[1]
f = Gtk.FileFilter()
f.add_pattern("*." + pattern)
if name:
f.set_name(name)
fcdialog.set_filter(f)
# Set initial dialog folder
if browse_reason:
start_folder = config.running_config.get_default_directory(conn,
browse_reason)
if start_folder is not None:
if os.access(start_folder, os.R_OK):
fcdialog.set_current_folder(start_folder)
# Run the dialog and parse the response
ret = None
if fcdialog.run() == Gtk.ResponseType.ACCEPT:
ret = fcdialog.get_filename()
fcdialog.destroy()
# Store the chosen directory in gconf if necessary
if ret and browse_reason and not ret.startswith("/dev"):
config.running_config.set_default_directory(os.path.dirname(ret),
browse_reason)
return ret
def pretty_hv(gtype, domtype):
"""
Convert XML <domain type='foo'> and <os><type>bar</type>
into a more human relevant string.
"""
gtype = gtype.lower()
domtype = domtype.lower()
label = domtype
if domtype == "kvm":
if gtype == "xen":
label = "xenner"
elif domtype == "xen":
if gtype == "xen":
label = "xen (paravirt)"
elif gtype == "hvm":
label = "xen (fullvirt)"
elif domtype == "test":
if gtype == "xen":
label = "test (xen)"
elif gtype == "hvm":
label = "test (hvm)"
return label
def iface_in_use_by(conn, name):
use_str = ""
for i in conn.list_interface_names():
iface = conn.get_interface(i)
if name in iface.get_slave_names():
if use_str:
use_str += ", "
use_str += iface.get_name()
return use_str
def chkbox_helper(src, getcb, setcb, text1, text2=None,
alwaysrecord=False,
default=True,
chktext=_("Don't ask me again")):
"""
Helper to prompt user about proceeding with an operation
Returns True if the 'yes' or 'ok' button was selected, False otherwise
@alwaysrecord: Don't require user to select 'yes' to record chkbox value
@default: What value to return if getcb tells us not to prompt
"""
do_prompt = getcb()
if not do_prompt:
return default
res = src.err.warn_chkbox(text1=text1, text2=text2,
chktext=chktext,
buttons=Gtk.ButtonsType.YES_NO)
response, skip_prompt = res
if alwaysrecord or response:
setcb(not skip_prompt)
return response
def get_list_selection(widget):
selection = widget.get_selection()
active = selection.get_selected()
treestore, treeiter = active
if treeiter is not None:
return treestore[treeiter]
return None
def set_list_selection(widget, rownum):
path = str(rownum)
selection = widget.get_selection()
selection.unselect_all()
widget.set_cursor(path)
selection.select_path(path)
def set_row_selection(listwidget, prevkey):
model = listwidget.get_model()
_iter = None
if prevkey:
for row in model:
if row[0] == prevkey:
_iter = row.iter
break
if not _iter:
_iter = model.get_iter_first()
if hasattr(listwidget, "get_selection"):
selection = listwidget.get_selection()
cb = selection.select_iter
else:
selection = listwidget
cb = selection.set_active_iter
if _iter:
cb(_iter)
selection.emit("changed")
def child_get_property(parent, child, propname):
# Wrapper for child_get_property, which pygobject doesn't properly
# introspect
value = GObject.Value()
value.init(GObject.TYPE_INT)
parent.child_get_property(child, propname, value)
return value.get_int()
def set_grid_row_visible(child, visible):
# For the passed widget, find its parent GtkGrid, and hide/show all
# elements that are in the same row as it. Simplifies having to name
# every element in a row when we want to dynamically hide things
# based on UI interraction
parent = child.get_parent()
if not type(parent) is Gtk.Grid:
raise RuntimeError("Programming error, parent must be grid, "
"not %s" % type(parent))
row = child_get_property(parent, child, "top-attach")
for child in parent.get_children():
if child_get_property(parent, child, "top-attach") == row:
child.set_visible(visible)
def default_uri(always_system=False):
if os.path.exists('/var/lib/xend'):
if (os.path.exists('/dev/xen/evtchn') or
os.path.exists("/proc/xen")):
return 'xen:///'
if (os.path.exists("/usr/bin/qemu") or
os.path.exists("/usr/bin/qemu-kvm") or
os.path.exists("/usr/bin/kvm") or
os.path.exists("/usr/libexec/qemu-kvm")):
if always_system or os.geteuid() == 0:
return "qemu:///system"
else:
return "qemu:///session"
return None
def exception_is_libvirt_error(e, error):
return (hasattr(libvirt, error) and
e.get_error_code() == getattr(libvirt, error))
def log_redefine_xml_diff(obj, origxml, newxml):
objname = "<%s name=%s>" % (obj.__class__.__name__, obj.get_name())
if origxml == newxml:
logging.debug("Redefine requested for %s, but XML didn't change!",
objname)
return
import difflib
diff = "".join(difflib.unified_diff(origxml.splitlines(1),
newxml.splitlines(1),
fromfile="Original XML",
tofile="New XML"))
logging.debug("Redefining %s with XML diff:\n%s", objname, diff)
| gpl-2.0 | 6,635,527,054,526,621,000 | 29.472702 | 80 | 0.576995 | false |
edubecks/vaidecaronaorg | caronasbrasilapp/djangoapp/apps/caronasbrasil/robots/crawler.py | 1 | 3567 | # coding: utf-8
from pprint import pprint
import unidecode
from djangoapp.apps.caronasbrasil.model.caronasbrasil.carona_post import CaronaPost
from djangoapp.apps.caronasbrasil.model.fb_groups.fb_groups_controller import FBGroupsController
from djangoapp.apps.caronasbrasil.persistence_controller import PersistenceController
__author__ = 'edubecks'
class Crawler(object):
## default time_interval 1 week = 60min * 24h *7d
def __init__(self, time_interval=10080):
self.time_interval = time_interval
return
def log_not_parsed_post(self,carona_post):
PersistenceController().add_parser_error(carona_post.fb_group_id,
carona_post.fb_post_id, carona_post.content_clean)
return
def post_is_commented(self, message):
message_decoded = unidecode.unidecode(message)
return message_decoded[:2]=='//'
def retrieve_posts(self, fb_group_id):
## persistence
persistence = PersistenceController()
city1, city1_state, city1_list, city2, city2_state, city2_list = \
persistence.get_cities_by_fb_group_id(fb_group_id)
## getting feed
fb_manager = FBGroupsController(fb_group_id)
feed = fb_manager.get_posts(last_time_checked=self.time_interval)
for fb_post in feed:
## check if the post is not commented
if (not self.post_is_commented(fb_post['message'])
## check if it is already parsed
and not persistence.exists_post(fb_post['id'])):
# pprint(fb_post)
## create new carona post
carona_post = CaronaPost(fb_post)
pprint(carona_post.content_clean)
## setting origin and destiny
carona_post.city1 = city1
carona_post.city1_state = city1_state
carona_post.city2 = city2
carona_post.city2_state = city2_state
carona_post.city1_list = city1_list
carona_post.city2_list = city2_list
## date / time
has_date_tag = carona_post.retrieve_date_tags()
carona_post.retrieve_time_tags()
# has_time_interval = carona_post.retrieve_time_interval()
has_time_tag = True if carona_post.tag_time else False
## origin_destiny
has_origin_destiny = carona_post.retrieve_origin_destiny()
## oferecer/ procurar
has_ofereco_procuro = carona_post.retrieve_ofereco_procuro_tag()
## [OPTIONAL] numero de vagas
has_vagas = carona_post.retrieve_vagas()
## check the tag requirements
# print(has_date_tag, has_time_tag, has_origin_destiny, has_ofereco_procuro)
if has_date_tag and has_time_tag and has_origin_destiny and has_ofereco_procuro:
## saving in the db
# pprint(str(carona_post))
# pprint('---------------------')
persistence.add_carona(carona_post)
else:
print('*************** wrong')
pprint(carona_post.content_clean)
pprint(str(carona_post))
print('*******************************************')
self.log_not_parsed_post(carona_post)
else:
## TODO: call logger
pass
return
| mit | -4,877,079,740,951,469,000 | 37.771739 | 99 | 0.552285 | false |
therewillbecode/ichnaea | ichnaea/api/views.py | 1 | 5002 | """
Implementation of a API specific HTTP service view.
"""
import colander
import simplejson as json
import six
from ichnaea.api.exceptions import (
DailyLimitExceeded,
InvalidAPIKey,
ParseError,
)
from ichnaea.api.rate_limit import rate_limit_exceeded
from ichnaea.models.api import ApiKey
from ichnaea import util
from ichnaea.webapp.view import BaseView
if six.PY2: # pragma: no cover
from ipaddr import IPAddress as ip_address # NOQA
else: # pragma: no cover
from ipaddress import ip_address
class BaseAPIView(BaseView):
"""Common base class for all API related views."""
check_api_key = True #: Should API keys be checked?
error_on_invalidkey = True #: Deny access for invalid API keys?
metric_path = None #: Dotted URL path, for example v1.submit.
schema = None #: An instance of a colander schema to validate the data.
view_type = None #: The type of view, for example submit or locate.
def __init__(self, request):
super(BaseAPIView, self).__init__(request)
self.raven_client = request.registry.raven_client
self.redis_client = request.registry.redis_client
self.stats_client = request.registry.stats_client
def log_unique_ip(self, apikey_shortname):
try:
ip = str(ip_address(self.request.client_addr))
except ValueError: # pragma: no cover
ip = None
if ip:
redis_key = 'apiuser:{api_type}:{api_name}:{date}'.format(
api_type=self.view_type,
api_name=apikey_shortname,
date=util.utcnow().date().strftime('%Y-%m-%d'),
)
with self.redis_client.pipeline() as pipe:
pipe.pfadd(redis_key, ip)
pipe.expire(redis_key, 691200) # 8 days
pipe.execute()
def log_count(self, apikey_shortname, apikey_log):
self.stats_client.incr(
self.view_type + '.request',
tags=['path:' + self.metric_path,
'key:' + apikey_shortname])
if self.request.client_addr and apikey_log:
try:
self.log_unique_ip(apikey_shortname)
except Exception: # pragma: no cover
self.raven_client.captureException()
def check(self):
api_key = None
api_key_text = self.request.GET.get('key', None)
if api_key_text is None:
self.log_count('none', False)
if self.error_on_invalidkey:
raise InvalidAPIKey()
if api_key_text is not None:
try:
session = self.request.db_ro_session
api_key = session.query(ApiKey).get(api_key_text)
except Exception: # pragma: no cover
# if we cannot connect to backend DB, skip api key check
self.raven_client.captureException()
if api_key is not None:
self.log_count(api_key.name, api_key.log)
rate_key = 'apilimit:{key}:{time}'.format(
key=api_key_text,
time=util.utcnow().strftime('%Y%m%d')
)
should_limit = rate_limit_exceeded(
self.redis_client,
rate_key,
maxreq=api_key.maxreq
)
if should_limit:
raise DailyLimitExceeded()
else:
if api_key_text is not None:
self.log_count('invalid', False)
if self.error_on_invalidkey:
raise InvalidAPIKey()
# If we failed to look up an ApiKey, create an empty one
# rather than passing None through
api_key = api_key or ApiKey(valid_key=None)
return self.view(api_key)
def preprocess_request(self):
errors = []
request_content = self.request.body
if self.request.headers.get('Content-Encoding') == 'gzip':
# handle gzip self.request bodies
try:
request_content = util.decode_gzip(self.request.body)
except OSError as exc:
errors.append({'name': None, 'description': repr(exc)})
request_data = {}
try:
request_data = json.loads(
request_content, encoding=self.request.charset)
except ValueError as exc:
errors.append({'name': None, 'description': repr(exc)})
validated_data = {}
try:
validated_data = self.schema.deserialize(request_data)
except colander.Invalid as exc:
errors.append({'name': None, 'description': exc.asdict()})
if request_content and errors:
raise ParseError()
return (validated_data, errors)
def __call__(self):
"""Execute the view and return a response."""
if self.check_api_key:
return self.check()
else:
api_key = ApiKey(valid_key=None, allow_fallback=False, log=False)
return self.view(api_key)
| apache-2.0 | -380,044,028,456,352,960 | 33.027211 | 77 | 0.57597 | false |
jrheling/pid_controller | pid_controller/tests/PeakCounterTest.py | 1 | 2050 | #!/usr/bin/python
import PeakCounter
import unittest
import random
import time
class PeakCounterTest(unittest.TestCase):
def setUp(self):
self.PC = PeakCounter.PeakCounter()
def test_construct(self):
self.assertIsInstance(self.PC, PeakCounter.PeakCounter)
def test_initnumpeaks(self):
self.assertEquals(self.PC.num_peaks,0)
def test_fivepeaks(self):
self.PC = PeakCounter.PeakCounter(5)
self.assertEquals(self.PC._max_peaks,5)
def test_add_value(self):
"""docstring for test_add_value"""
self.PC.add_value(random.randint(0,100))
def test_set_lookback_sizeNaN(self):
with self.assertRaises(ValueError):
self.PC.lookback_size = "foo"
def test_set_lookback_sizeTooSmal(self):
with self.assertRaises(ValueError):
self.PC.lookback_size = 1
def test_set_lookback_size(self):
i = random.randint(2,85)
self.PC.lookback_size = i
self.assertEquals(self.PC.lookback_size,i)
def test_justInflexted(self):
# FIXME: implement
pass
def test_get_num_peaks(self):
# FIXME: implement
pass
def test_sequence1(self):
seq = [ 5, 1, 2, 4, 12, 8, 3, 6, 1.5, 4, 5.3, 8.7, 8.6, 0.7]
# peaks should be 5, 12, 8.7
for i in seq:
self.PC.add_value(i)
time.sleep(0.1)
self.assertEquals(self.PC.num_peaks,3)
self.assertEquals(self.PC.get_last_peaks(2),[12, 8.7])
self.assertEquals(self.PC.get_last_peaks(4),[5, 12, 8.7])
self.assertEquals(self.PC.get_last_peaks(3),[5, 12, 8.7])
self.assertEquals(self.PC.get_last_peaks(1),[8.7])
## last_peak_delta includes processing time, so we can't predict it precisely
self.assertTrue((self.PC.last_peak_delta - 0.7) < 0.005)
pass
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(PeakCounterTest)
unittest.TextTestRunner(verbosity=2).run(suite) | apache-2.0 | 9,204,944,022,240,669,000 | 29.61194 | 85 | 0.611707 | false |
NorthernLightsDataLab/pyCFL | pyCFL/core.py | 1 | 1309 |
import urllib.request, json, re
import pyCFL.config as cfg
class cflAPI(object):
def __init__(self):
self.base_url = 'http://api.cfl.ca/v1'
self._set_api_key()
def _get_games_data(self, season, game_id=None):
if game_id:
api_url = self.base_url + '/games/' + str(season) + '/game/' + str(game_id)
else:
api_url = self.base_url + '/games/' + str(season)
with urllib.request.urlopen(self._build_url(api_url)) as url:
data = json.loads(url.read().decode())
return(data)
def _get_play_by_play(self, season, game_id):
api_url = self.base_url + '/games/' + str(season) + '/game/' + str(game_id) + '?include=play_by_play'
with urllib.request.urlopen(self._build_url(api_url)) as url:
data = json.loads(url.read().decode())
return(data)
def _set_api_key(self):
self.api_key = cfg.Settings().api_key
print('api key is: {}'.format(self.api_key))
def _build_url(self, url):
try:
if re.search('\?', url):
url = url + '&key=' + self.api_key
else:
url = url + '?key=' + self.api_key
except:
print("API must be set first using _set_api_key('YOUR_API_KEY')")
return(url)
| mit | 3,525,122,950,929,481,700 | 34.378378 | 109 | 0.537051 | false |
vroomfondle/podi | lib/podi/util/util.py | 1 | 4084 | """
Podi, a command-line interface for Kodi.
Copyright (C) 2015 Peter Frost <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from ..rpc.library import list_episodes, list_tv_shows, list_movies
SORT_ASC = 1
SORT_DESC = 2
def retrieve_sorted_episodes(rpc, tv_show_id, sort_field='episodeid'):
"""
Sends a JSON RPC call to retrieve a list of episodes for the given show.
:param rpc A callable which will send the JSONRPC request to the Kodi server
:param tv_show_id The id of the target show
"""
episodes = rpc(list_episodes(tv_show_id)).get(
'episodes', [])
for episode in sorted(
episodes,
key=lambda episode: episode[sort_field]):
yield episode
def retrieve_sorted_movies(rpc, sort_field='movieid', filters=None):
"""
Sends a JSON RPC call to retrieve a list of movies.
:param rpc A callable which will send the JSONRPC request to the Kodi server
"""
movies = rpc(list_movies(filters=filters)).get('movies', [])
for movie in sorted(
movies,
key=lambda movie: movie[sort_field]):
yield movie
def retrieve_sorted_shows(rpc, tv_show_id=None, sort_field='tvshowid'):
"""
Sends a JSON RPC call to retrieve a list of TV shows.
:param rpc A callable which will send the JSONRPC request to the Kodi server.
:param tv_show_id If set, restrict the list to a single id.
"""
shows = rpc(list_tv_shows()).get('tvshows', [])
for show in sorted(shows, key=lambda show: show[sort_field]):
if (tv_show_id is None) or int(show['tvshowid']) == int(tv_show_id):
yield show
def list_to_dicts(key, input_list):
"""
Turns a list of values into a list of single-entry dicts, with the provided key,
so that the dicts can be used with pystache. The list is modified in-place.
"""
for index in range(len(input_list)):
input_list[index] = {key: input_list[index]}
def align_fields_for_display(items, fields):
"""
Pads/truncates fields in each item to the specified length and puts the result in index ('display'+field_name).
:param fields A list of tuples (str,int): (field_name, length).
:returns the input list with padded items.
"""
for item in items:
for (field_name, length) in fields:
if isinstance(item[field_name], str) or isinstance(item[field_name], str):
field_value = item[field_name]
else:
field_value = str(item[field_name])
item['display{0}'.format(field_name)] = field_value[
0:length - 1].ljust(length)
return items
def format_runtime(video_item):
"""
Finds the longest video stream in a given item, and returns a dict:
{'total_seconds':n, 'hours':n, 'minutes':n, 'seconds':n, 'str':"{hours}:{minutes}:{seconds}"}.
If the 'streamdetails' sub-dict is entirely missing, expect to see an IndexError.
:param video_item An item as returned in response to the JSON defined by the lib.podi.rpc.library methods
Should include a sub-dict called 'streamdetails'.
"""
minutes, seconds = divmod(int(video_item['runtime']), 60)
hours, minutes = divmod(minutes, 60)
return {
'total_seconds': video_item['runtime'],
'hours': hours,
'minutes': minutes,
'seconds': seconds,
'str': "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds),
}
| gpl-3.0 | -5,527,879,337,605,631,000 | 36.46789 | 115 | 0.652057 | false |
h-mayorquin/camp_india_2016 | tutorials/chemical switches/moose/neuroml/GranuleCell/Granule98.py | 1 | 2209 | ## Aditya Gilra, NCBS, Bangalore, 2012
"""
Inside the .../moose-examples/GranuleCell/ directory supplied with MOOSE, run
python testNeuroML_Gran98.py
(other channels and morph xml files are already present in this same directory).
The soma name below is hard coded for gran98, else any other file can be used by modifying this script.
"""
#import os
#os.environ['NUMPTHREADS'] = '1'
#import sys
#sys.path.append('../../../python')
import moose
from moose.utils import *
from moose.neuroml.NeuroML import NeuroML
from pylab import *
simdt = 1e-6 # s
plotdt = 10e-6 # s
runtime = 0.7 # s
def loadGran98NeuroML_L123(filename):
neuromlR = NeuroML()
populationDict, projectionDict = \
neuromlR.readNeuroMLFromFile(filename)
# readNeuroMLFromFile returns populationDict = { 'populationname1':(cellname,{int(instanceid1):moosecell, ... }) , ... }
# and projectionDict = { 'projectionname1':(source,target,[(syn_name1,pre_seg_path,post_seg_path),...]) , ... }
soma_path = populationDict['Gran'][1][0].path+'/Soma_0'
somaVm = setupTable('somaVm',moose.Compartment(soma_path),'Vm')
somaCa = setupTable('somaCa',moose.CaConc(soma_path+'/Gran_CaPool_98'),'Ca')
somaIKCa = setupTable('somaIKCa',moose.HHChannel(soma_path+'/Gran_KCa_98'),'Gk')
#KDrX = setupTable('ChanX',moose.HHChannel(soma_path+'/Gran_KDr_98'),'X')
soma = moose.Compartment(soma_path)
print "Reinit MOOSE ... "
resetSim(['/elec','/cells'],simdt,plotdt,simmethod='ee') # from moose.utils
print "Running ... "
moose.start(runtime)
# plotting
tvec = arange(0.0,runtime,plotdt)
plot(tvec,somaVm.vector[1:])
title('Soma Vm')
xlabel('time (s)')
ylabel('Voltage (V)')
figure()
plot(tvec,somaCa.vector[1:])
title('Soma Ca')
xlabel('time (s)')
ylabel('Ca conc (mol/m^3)')
figure()
plot(tvec,somaIKCa.vector[1:])
title('KCa current (A)')
xlabel('time (s)')
ylabel('')
print "Showing plots ..."
show()
filename = "GranuleCell.net.xml"
if __name__ == "__main__":
if len(sys.argv)<2:
filename = "GranuleCell.net.xml"
else:
filename = sys.argv[1]
loadGran98NeuroML_L123(filename)
| mit | -6,429,822,865,173,721,000 | 31.485294 | 124 | 0.650521 | false |
Diacamma2/asso | diacamma/asso/migrations/0002_addon.py | 1 | 1243 | # -*- coding: utf-8 -*-
'''
Initial django functions
@author: Laurent GAY
@organization: sd-libre.fr
@contact: [email protected]
@copyright: 2017 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from django.db import migrations
from django.utils.translation import ugettext_lazy as _
from lucterios.contacts.models import StructureType
def addon_values(*args):
StructureType.objects.create(name=_('family'))
class Migration(migrations.Migration):
dependencies = [
('asso', '0001_initial'),
]
operations = [
migrations.RunPython(addon_values),
]
| gpl-3.0 | 3,421,477,900,492,315,000 | 26.622222 | 68 | 0.746581 | false |
jricardo27/travelhelper | old/core/lpparser.py | 1 | 4319 | __author__ = 'ricardo'
"""
Parse Lonely Planet pages
"""
import re
import urllib2
from collections import OrderedDict
from bs4 import BeautifulSoup
def get_text(elem):
"""
Return the element's text encoded in utf-8
"""
return '\n'.join(elem.stripped_strings).encode('utf8')
def parse_sight_index(index_url):
"""
Return all the links found in the sight page
"""
index = 1
sight_urls = []
while True:
url = '{0}.json?page={1}'.format(index_url, index)
print('Downloading page {0}'.format(url))
index += 1
try:
content = urllib2.urlopen(url)
sight_urls += _parse_sight_index_page(content)
except urllib2.HTTPError:
break
return sight_urls
def _parse_sight_index_page(html):
"""
Parse a country's sights page from Lonely Planet and return
and array of urls to the information
"""
soup = BeautifulSoup(html)
content = soup.find_all('div', class_='stack__content')[0]
cols = content.find_all('div', class_='col--one-whole')
return [
col.a.get('href')
for col in cols
if col.a.get('href', False)
]
def _parse_sight_info(soup):
"""
Parse the information that appears at the right of the page
"""
dd_list = soup.find_all('dd')
info = {}
for elem in dd_list:
key = get_text(elem.find_previous('dt'))
value = get_text(elem)
if key in info:
info[key] = '{0}<br/>{1}'.format(info[key], value)
else:
info[key] = value
return info
def get_country_city(url):
"""
Parse the given url and return the country and city name
"""
regex = r'\.com/([^/]*).*?/([^/]*)$'
if 'sights' in url:
regex = regex.replace(r'$', r'/sights.*')
try:
country, city = re.findall(regex, url)[0]
except IndexError:
city = None
country = None
return country, city
def parse_sight(url):
"""
Download and parse an individual sight page
Return a dictionary
"""
print('Parsing {0}'.format(url))
country, city = get_country_city(url)
soup = BeautifulSoup(urllib2.urlopen(url).read())
sight = OrderedDict()
sight['title'] = get_text(soup.h1)
sight['url'] = url
sight['city'] = city
sight['country'] = country
attributes = (
'Prices',
'Opening hours',
'More information',
'Address',
'Getting there',
)
info = _parse_sight_info(soup)
for attr_ in attributes:
if attr_ in info:
sight[attr_] = info[attr_]
sight['description'] = get_text(
soup.find_all('div', class_='ttd__section--description')[0]
)
try:
images = soup.find_all('div', class_='tab__content')[0].find_all('img')
prefix = 'http://'
img_url = images[0].get('src')
if img_url[:len(prefix)] != prefix:
try:
img_url = images[0].get('src').split(prefix)[1]
img_url = '{0}{1}'.format(prefix, img_url)
except IndexError:
pass
if 'maps.googleapis.com' not in img_url:
sight['image_src'] = img_url
except IndexError:
pass
return sight
def parse_price(price_string):
"""
Return the result of applying a regex over the string
"""
regex_exp = {
'currencies': u'\u20AC|Dh',
'price': u'[\d\.]+',
'exclude_years': u'(?!-?\d?yr)',
'hours': u'(?!am|pm|hr|\xBD)',
}
regex = u'({currencies})?({price}){exclude_years}{hours}'.format(
**regex_exp
)
return re.findall(regex, price_string)
def parse_opening_hours(input_string):
"""
Return the result of applying a regex over the input string
9am-5pm
11am Mon-Fri
2.30pm Mon-Fri, 11am & 2.30pm Sat & Sun
9.30am-4pm Tue-Sun
9am-7pm Mon-Fri, to 4pm Sat
8pm-midnight Tue-Sun
06:00-18:00
24hr
1-6pm Tue-Sat
10.30am-7pm Tue-Sat, to 5.30pm Sun
6-10am & 4-8pm
9.30am-3.30pm Mon-Fri, to 1pm Sat & Sun
Mass 7.30pm Mon-Sat, 8am, 10am & 7.30pm Sun
10am-8pm May-Aug, shorter hours in winter
10am-1pm & 2-6pm Tue-Sun
9am-6pm Oct-Mar, 9am-7pm Apr, 9am-8pm May-Sep
closed to the public
"""
pass | bsd-3-clause | -1,066,586,597,003,418,100 | 23 | 79 | 0.561704 | false |
usccolumbia/CSCE206_Projects | WinnerTakeAllPokerGame/winnertakeall.py | 1 | 4415 | #University of South Carolina
#CSCE206 Scientific Application Programming
#Fall 2014 Final project
#Poker game
import Tkinter
from Tkinter import *
import random
def shuffledeck():
deck = []
for s in ['Clubs', 'Diamonds', 'Hearts', 'Spades']:
for n in range(2, 15):
deck.append([n, s])
random.shuffle(deck)
return deck
def cardnumber(card):
if card == 11:
return 'Jack'
elif card == 12:
return 'Queen'
elif card == 13:
return 'King'
elif card == 14:
return 'Ace'
else:
return str(card)
def deal(deck):
return deck[::2], deck[1::2]
def play(Jeffrey, siri):
if Jeffrey > siri:
return 'Jeffrey'
elif siri > Jeffrey:
return 'siri'
else:
return 'Tie'
def refill(cardswon):
random.shuffle(cardswon)
return cardswon
deck = shuffledeck()
Jeffreycards, siricards = deal(deck)
inplay = []
round = 0
Jeffreywon = []
siriwon = []
root = Tkinter.Tk()
canvas = Tkinter.Canvas(root)
canvas.grid(row = 0, column = 0)
def getImageName(card):
number=card[0]
suit=card[1]
map={"Diamonds":'d','Hearts':'h','Clubs':'c','Spades':'s'}
if number<10:
return 'Image/'+'0'+str(number)+map[suit]+'.gif'
elif number==14:
return 'Image/01' +map[suit]+'.gif'
else:
return 'Image/'+str(number)+map[suit]+'.gif'
def OnButtonClick():
global labelVariable
global Jeffreywon,siriwon,inplay,deck,round,Jeffreycards, siricards,Tkinter,root,photo1,photo
global canvas
if len(Jeffreycards) == 0 or len(siricards) == 0:
if len(Jeffreycards) > len(siricards):
labelVariable.set("Jeffrey has won the game!")
elif len(siricards) > len(Jeffreycards):
labelVariable.set("siri has won the game!")
# labelVariable.set("game over")
return
round += 1
labelVariable.set( "Time for Round %d" % round)
Jeffreycard = Jeffreycards.pop(0)
siricard = siricards.pop(0)
# print Jeffreycard, siricard
photo = Tkinter.PhotoImage(file = getImageName(Jeffreycard))
canvas.create_image(50,130, image=photo)
photo1=Tkinter.PhotoImage(file = getImageName(siricard))
canvas.create_image(200,130, image=photo1)
inplay.extend([Jeffreycard, siricard])
labelVariable.set( "Jeffrey flips the %s of %s." % (cardnumber(Jeffreycard[0]), Jeffreycard[1]))
labelVariable.set( "siri flips the %s of %s." % (cardnumber(siricard[0]), siricard[1]))
roundwinner = play(Jeffreycard[0], siricard[0])
if roundwinner == 'Jeffrey':
labelVariable1.set( "Jeffrey wins this round!")
Jeffreywon.extend(inplay)
inplay = []
elif roundwinner == 'siri':
labelVariable1.set( "siri wins this round!")
siriwon.extend(inplay)
inplay = []
elif roundwinner == 'Tie':
labelVariable1.set( "Jeffrey and siri have tied!")
labelVariable.set( " %s cards %s cards." % (len(Jeffreywon)+len(Jeffreycards), len(siriwon)+len(siricards)))
if len(Jeffreycards) == 0 and len(Jeffreywon) > 0:
Jeffreycards = refill(Jeffreywon)
Jeffreywon = []
if len(siricards) == 0 and len(siriwon) > 0:
siricards = refill(siriwon)
siriwon = []
photo = Tkinter.PhotoImage(file = 'Image/back111.gif')
canvas.create_image(50,130, image=photo)
photo1 = Tkinter.PhotoImage(file = 'Image/back111.gif')
canvas.create_image(200,130, image=photo1)
# photo1=Tkinter.PhotoImage(file = 'Image/01h.gif')
# canvas.create_image(150,100, image=photo1)
button = Tkinter.Button(root,text=u"Play another round",
command=OnButtonClick)
button.grid(column=1,row=0)
labelVariable = Tkinter.StringVar()
label = Tkinter.Label(root,textvariable=labelVariable,anchor="w",fg="black",bg="white")
label.grid(column=0,row=6,columnspan=2,sticky='EW')
labelVariable.set(u"Let's Play!")
labelVariable1 = Tkinter.StringVar()
label1 = Tkinter.Label(root,textvariable=labelVariable1,anchor="w",fg="black",bg="white")
label1.grid(column=0,row=5,columnspan=1,sticky='EW')
labelVariable1.set(u"Hello!")
labelVariable2 = Tkinter.StringVar()
label2 = Tkinter.Label(root,textvariable=labelVariable2,anchor='w',fg="black",bg="white")
label2.grid(column=0,row=1,columnspan=1,sticky='EW')
labelVariable2.set(u" Jeffrey Siri ")
root.mainloop() | mit | 7,677,825,762,820,851,000 | 26.949367 | 123 | 0.6453 | false |
mtik00/yamicache | tests/test_noclass.py | 1 | 1369 | """
Just a quick test to make sure there's no ``cls`` or ``self`` odness.
"""
from __future__ import print_function
import sys
import time
from yamicache import Cache
c = Cache(prefix="myapp", hashing=False, debug=False)
@c.cached()
def my_func(argument, power):
"""running my_func"""
return argument ** power
@c.cached()
def return_list(index):
mylists = {0: [1, 2, 3], 1: [4, 5, 6]}
return mylists[index]
def test_main():
assert len(c) == 0
for _ in [0, 1, 2, 3, 4, 5]:
my_func(2, 3)
assert len(c) == 1
def test_lists():
"""Make sure lists are returned"""
assert return_list(0) == [1, 2, 3]
assert return_list(0) == [1, 2, 3]
assert return_list(1) == [4, 5, 6]
assert return_list(1) == [4, 5, 6]
class MyObject(object):
def __init__(self, number):
self.number = number
@c.cached(timeout=1)
def return_object_list():
return [MyObject(0), MyObject(1), MyObject(2)]
def test_object_list():
"""Test a result with a timeout & objects are returned"""
result = return_object_list()
assert result[0].number == 0
assert result[1].number == 1
assert result[2].number == 2
time.sleep(2)
result = return_object_list()
assert result[0].number == 0
assert result[1].number == 1
assert result[2].number == 2
if __name__ == "__main__":
test_main()
| mit | -7,669,122,488,794,662,000 | 20.061538 | 69 | 0.594595 | false |
splice/splice-server | src/splice/entitlement/tests/pools.py | 1 | 14245 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import datetime
import json
import logging
from splice.common import utils
from splice.common.models import Pool
# Unit test imports
from base import BaseEntitlementTestCase
LOG = logging.getLogger(__name__)
class PoolTest(BaseEntitlementTestCase):
def setUp(self):
super(PoolTest, self).setUp()
def tearDown(self):
super(PoolTest, self).tearDown()
def test_date_as_string_is_converted_on_save(self):
found = Pool.objects()
self.assertEquals(len(found), 0)
datestr = "2012-12-06T11:13:06.432367"
p = Pool()
p.uuid = "a"
p.account = 1
p.active = True
p.contract = 1
p.product_id = "something"
p.product_name = "something_name"
p.product_attributes = {}
p.provided_products = []
p.created = datestr
p.start_date = datestr
p.end_date = datestr
p.updated = datestr
p.quantity = 0
p.save()
found = Pool.objects()
self.assertEquals(len(found), 1)
self.assertEquals(found[0].uuid, p.uuid)
self.assertEquals(found[0].account, p.account)
self.assertEquals(found[0].active, p.active)
self.assertEquals(found[0].contract, p.contract)
self.assertEquals(found[0].product_id, p.product_id)
self.assertEquals(found[0].product_name, p.product_name)
self.assertEquals(found[0].product_attributes, p.product_attributes)
self.assertEquals(found[0].provided_products, p.provided_products)
self.assertEquals(found[0].quantity, p.quantity)
self.assertEquals(type(found[0].created), datetime.datetime)
self.assertEquals(type(found[0].updated), datetime.datetime)
self.assertEquals(type(found[0].start_date), datetime.datetime)
self.assertEquals(type(found[0].end_date), datetime.datetime)
def test_get_pool_collection(self):
datestr = "2010-10-01T11:01:00.432367"
a = Pool(uuid="uuid a", account=1, contract=1, active=True, product_id="pid1", product_name="pname",
quantity=1, created=datestr, updated=datestr, start_date=datestr, end_date=datestr)
a.save()
b = Pool(uuid="uuid b", account=1, contract=1, active=True, product_id="pid1", product_name="pname",
quantity=1, created=datestr, updated=datestr, start_date=datestr, end_date=datestr)
b.save()
resp = self.api_client.get('/api/v1/pool/', format='json',
SSL_CLIENT_CERT=self.expected_valid_splice_server_identity_pem)
self.assertEquals(resp.status_code, 200)
def test_example_with_raw_string_data(self):
example = {"objects": [
{
"uuid": "uuid_a",
"account": "0",
"contract": "1",
"active": "True",
"product_id": "p0",
"product_name": "p0_name",
"quantity": "1",
"created": "2012-12-07T15:35:54.448000",
"updated": "2012-12-07T15:35:54.448000",
"start_date": "2012-12-07T15:35:54.448000",
"end_date": "2012-12-07T15:35:54.448000",
},
{
"uuid": "uuid_b",
"account": "0",
"contract": "1",
"active": "True",
"product_id": "p0",
"product_name": "p0_name",
"quantity": "1",
"created": "2012-12-07T15:35:54.448000",
"updated": "2012-12-07T15:35:54.448000",
"start_date": "2012-12-07T15:35:54.448000",
"end_date": "2012-12-07T15:35:54.448000",
},
]}
post_data = json.dumps(example)
LOG.info("Post to pool with data: %s" % (post_data))
resp = self.raw_api_client.post('/api/v1/pool/', format='json', data=post_data,
SSL_CLIENT_CERT=self.expected_valid_splice_server_identity_pem)
LOG.info("Response: %s" % (resp))
self.assertEquals(resp.status_code, 204)
found = Pool.objects()
self.assertEquals(len(found), 2)
self.assertIn(found[0].uuid, ("uuid_a", "uuid_b"))
def test_uploading_single_pool(self):
found = Pool.objects()
self.assertEquals(len(found), 0)
datestr = "2012-12-06T11:13:06.432367+00:00"
p = Pool()
p.uuid = "a"
p.account = 1
p.active = True
p.contract = 1
p.product_id = "something"
p.product_name = "something_name"
p.product_attributes = {}
p.provided_products = []
p.created = datestr
p.start_date = datestr
p.end_date = datestr
p.updated = datestr
p.quantity = 0
example = {"objects":[p]}
post_data = utils.obj_to_json(example)
LOG.info("Calling api for pool import with post data: '%s'" % (post_data))
resp = self.raw_api_client.post('/api/v1/pool/', format='json', data=post_data,
SSL_CLIENT_CERT=self.expected_valid_splice_server_identity_pem)
self.assertEquals(resp.status_code, 204)
# Now check that the server api saved the object as expected
found = Pool.objects()
self.assertEquals(len(found), 1)
self.assertEquals(found[0].uuid, p.uuid)
self.assertEquals(found[0].account, p.account)
self.assertEquals(found[0].active, p.active)
self.assertEquals(found[0].contract, p.contract)
self.assertEquals(found[0].product_id, p.product_id)
self.assertEquals(found[0].product_name, p.product_name)
self.assertEquals(found[0].product_attributes, p.product_attributes)
self.assertEquals(found[0].provided_products, p.provided_products)
self.assertEquals(found[0].quantity, p.quantity)
self.assertEquals(type(found[0].created), datetime.datetime)
self.assertEquals(str(found[0].created), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(type(found[0].updated), datetime.datetime)
self.assertEquals(str(found[0].updated), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(type(found[0].start_date), datetime.datetime)
self.assertEquals(str(found[0].start_date), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(type(found[0].end_date), datetime.datetime)
self.assertEquals(str(found[0].end_date), "2012-12-06 11:13:06.432000+00:00")
def test_uploading_duplicate(self):
#
# Similar to test_uploading_single_pool, except for this test we will save the Pool object we create
# then upload the same exact data and verify we have only 1 record in the DB...no duplicate should be present.
#
found = Pool.objects()
self.assertEquals(len(found), 0)
datestr = "2012-12-06T11:13:06.432367"
p = Pool()
p.uuid = "a"
p.account = 1
p.active = True
p.contract = 1
p.product_id = "something"
p.product_name = "something_name"
p.product_attributes = {}
p.provided_products = []
p.created = datestr
p.start_date = datestr
p.end_date = datestr
p.updated = datestr
p.quantity = 0
p.save()
self.assertEquals(len(found), 1)
example = {"objects":[p]}
post_data = utils.obj_to_json(example)
LOG.info("Calling api for pool import with post data: '%s'" % (post_data))
resp = self.raw_api_client.post('/api/v1/pool/', format='json', data=post_data,
SSL_CLIENT_CERT=self.expected_valid_splice_server_identity_pem)
self.assertEquals(resp.status_code, 204)
# Now check that the server api saved the object as expected
found = Pool.objects()
self.assertEquals(len(found), 1)
self.assertEquals(found[0].uuid, p.uuid)
def test_upload_older_pool(self):
found = Pool.objects()
self.assertEquals(len(found), 0)
# Create 'newer' pool and save to DB
datestr = "2012-12-06T11:13:06.432367"
newer = Pool()
newer.uuid = "a"
newer.account = 1
newer.active = True
newer.contract = 1
newer.product_id = "something"
newer.product_name = "something_name"
newer.product_attributes = {}
newer.provided_products = []
newer.created = datestr
newer.start_date = datestr
newer.end_date = datestr
newer.updated = datestr
newer.quantity = 0
newer.save()
found = Pool.objects()
self.assertEquals(len(found), 1)
# Create 'older' which is one month older than newer
older = Pool()
older.uuid = newer.uuid
older.account = 20
older.contract = 400
older.active = False
older.product_id = "something older"
older.product_name = "something older name"
older.product_attributes = {}
older.provided_products = []
older.created = newer.created
older.updated = "2012-11-06T11:13:06.432367" # 1 month older
older.start_date = older.updated
older.end_date = older.updated
older.quantity = 1
example = {"objects": [older._data]}
post_data = utils.obj_to_json(example)
LOG.info("Calling api for pool import with post data: '%s'" % (post_data))
resp = self.raw_api_client.post('/api/v1/pool/', format='json', data=post_data,
SSL_CLIENT_CERT=self.expected_valid_splice_server_identity_pem)
LOG.info("Status Code: %s, Response: %s" % (resp.status_code, resp))
self.assertEquals(resp.status_code, 204)
# Now check that the server api kept the 'newer' as is and ignored the older
found = Pool.objects()
self.assertEquals(len(found), 1)
self.assertEquals(found[0].uuid, newer.uuid)
self.assertEquals(found[0].active, newer.active)
self.assertEquals(found[0].account, newer.account)
self.assertEquals(found[0].contract, newer.contract)
self.assertEquals(found[0].quantity, newer.quantity)
self.assertEquals(found[0].product_id, newer.product_id)
self.assertEquals(found[0].product_name, newer.product_name)
self.assertEquals(found[0].product_attributes, newer.product_attributes)
self.assertEquals(found[0].provided_products, newer.provided_products)
self.assertEquals(str(found[0].created), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(str(found[0].updated), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(str(found[0].start_date), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(str(found[0].end_date), "2012-12-06 11:13:06.432000+00:00")
def test_upload_newer_spliceserver(self):
found = Pool.objects()
self.assertEquals(len(found), 0)
# Create 'older' pool and save to DB
older = Pool()
older.uuid = "a"
older.account = 20
older.contract = 400
older.active = False
older.product_id = "something older"
older.product_name = "something older name"
older.product_attributes = {}
older.provided_products = []
older.created = "2012-11-06T11:13:06.432367+00:00"
older.updated = "2012-11-06T11:13:06.432367+00:00" # 1 month older
older.start_date = older.updated
older.end_date = older.updated
older.quantity = 1
older.save()
found = Pool.objects()
self.assertEquals(len(found), 1)
datestr = "2012-12-06T11:13:06.432367+00:00"
newer = Pool()
newer.uuid = older.uuid
newer.account = 1
newer.active = True
newer.contract = 1
newer.product_id = "something"
newer.product_name = "something_name"
newer.product_attributes = {}
newer.provided_products = []
newer.created = datestr
newer.start_date = datestr
newer.end_date = datestr
newer.updated = datestr
newer.quantity = 0
example = {"objects": [newer]}
post_data = utils.obj_to_json(example)
LOG.info("Calling api for pool with post data: '%s'" % (post_data))
resp = self.raw_api_client.post('/api/v1/pool/', format='json', data=post_data,
SSL_CLIENT_CERT=self.expected_valid_splice_server_identity_pem)
LOG.info("Status Code: %s, Response: %s" % (resp.status_code, resp))
self.assertEquals(resp.status_code, 204)
# Now check that the server api kept the 'newer' as is and ignored the older
found = Pool.objects()
self.assertEquals(len(found), 1)
self.assertEquals(found[0].uuid, newer.uuid)
self.assertEquals(found[0].active, newer.active)
self.assertEquals(found[0].account, newer.account)
self.assertEquals(found[0].contract, newer.contract)
self.assertEquals(found[0].quantity, newer.quantity)
self.assertEquals(found[0].product_id, newer.product_id)
self.assertEquals(found[0].product_name, newer.product_name)
self.assertEquals(found[0].product_attributes, newer.product_attributes)
self.assertEquals(found[0].provided_products, newer.provided_products)
self.assertEquals(str(found[0].created), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(str(found[0].updated), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(str(found[0].start_date), "2012-12-06 11:13:06.432000+00:00")
self.assertEquals(str(found[0].end_date), "2012-12-06 11:13:06.432000+00:00")
| gpl-2.0 | -1,445,925,718,024,497,400 | 41.519403 | 118 | 0.607484 | false |
bretthandrews/marvin | python/marvin/tools/spectrum.py | 1 | 7931 | #!/usr/bin/env python
# encoding: utf-8
#
# spectrum.py
#
# Licensed under a 3-clause BSD license.
# Revision history:
# 13 Apr 2016 J. Sánchez-Gallego
# Initial version
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import matplotlib.pyplot as plt
class Spectrum(object):
"""A class representing an spectrum with extra functionality.
Parameters:
flux (array-like):
The 1-D array contianing the spectrum.
units (str, optional):
The units of the flux spectrum.
wavelength (array-like, optional):
The wavelength solution for ``spectrum``. Must have the same number
of elements.
ivar (array-like, optional):
The inverse variance array for ``spectrum``. Must have the same
number of elements.
mask (array-like, optional):
The mask array for ``spectrum``. Must have the same number of
elements.
wavelength_unit (str, optional):
The units of the wavelength solution.
"""
def __init__(self, flux, units=None, wavelength_unit=None,
ivar=None, mask=None, wavelength=None):
self.flux = np.array(flux)
self.ivar = np.array(ivar) if ivar is not None else None
self.mask = np.array(mask) if mask is not None else None
self.wavelength = np.array(wavelength) if wavelength is not None else None
self.units = units
self.wavelength_unit = wavelength_unit
# Performs some checks.
assert len(self.flux.shape) == 1, 'spectrum must be 1-D'
if self.ivar is not None:
assert len(self.ivar.shape) == 1, 'ivar must be 1-D'
assert len(self.flux) == len(self.ivar), \
'ivar must have the same lenght as the base spectrum'
if self.mask is not None:
assert len(self.mask.shape) == 1, 'mask must be 1-D'
assert len(self.flux) == len(self.mask), \
'mask must have the same lenght as the base spectrum'
if self.wavelength is not None:
assert len(self.wavelength.shape) == 1, 'wavelength must be 1-D'
assert len(self.flux) == len(self.wavelength), \
'wavelength must have the same lenght as the base spectrum'
def __repr__(self):
"""Representation for Spectrum."""
return '<Marvin Spectrum ({0!s})'.format(self.flux)
def plot(self, array='flux', xlim=None, ylim=(0, None), mask_color=None,
xlabel=None, ylabel=None, figure=None, return_figure=False, **kwargs):
"""Plots a spectrum using matplotlib.
Returns a |axes|_ object with a representation of this spectrum.
The returned ``axes`` object can then be showed, modified, or saved to
a file. If running Marvin from an iPython console and
`matplotlib.pyplot.ion()
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.ion>`_,
the plot will be displayed interactivelly.
Parameters:
array ({'flux', 'ivar', 'mask'}):
The array to display, defaults to the internal spectrum with
which the object was initialised.
xlim,ylim (tuple-like or None):
The range to display for the x- and y-axis, respectively,
defined as a tuple of two elements ``[xmin, xmax]``. If
the range is ``None``, the range for the axis will be set
automatically by matploltib. If ``Spectrum.wavelength`` is
defined, the range in the x-axis must be defined as a
wavelength range. Default for ylim is (0, None), which cuts
off negative values but lets the maximum float.
xlabel,ylabel (str or None):
The axis labels to be passed to the plot. If ``xlabel=None``
and ``Spectrum.wavelength_unit`` is defined, those units will
be used, after being properly formatted for Latex display.
If ``ylabel=None``, the y-axis label will be automatically
defined base on the type of input array.
mask_color (matplotlib valid color or None):
If set and ``Spectrum.mask`` is defined, the elements of
``array`` with ``mask`` will be coloured using that value.
More information about `matplotlib colours
<http://matplotlib.org/api/colors_api.html>`_.
figure (matplotlib Figure object or None):
The matplotlib figure object from which the axes must be
created. If ``figure=None``, a new figure will be created.
return_figure (bool):
If ``True``, the matplotlib Figure object used will be returned
along with the axes object.
kwargs (dict):
Any other keyword argument that will be passed to
`matplotlib.pyplot.plot
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot>`_.
Returns:
ax:
The `matplotlib.axes <http://matplotlib.org/api/axes_api.html>`_
object containing the plot representing the spectrum. If
``return_figure=True``, a tuple will be returned of the form
``(ax, fig)``.
Example:
>>> spectrum = Spectrum(np.arange(100), wavelength=np.arange(100)*0.1)
>>> ax = spectrum.plot(xrange=[5, 7])
>>> ax.show()
We can change the range of the axes after the object has been created.
>>> ax.set_xlim(3, 8)
>>> ax.show()
.. |axes| replace:: matplotlib.axes
.. _axes: http://matplotlib.org/api/axes_api.html
"""
array = array.lower()
validSpectrum = ['flux', 'ivar', 'mask']
assert array in validSpectrum, 'array must be one of {0!r}'.format(validSpectrum)
if array == 'flux':
data = self.flux
elif array == 'ivar':
data = self.ivar
elif array == 'mask':
data = self.mask
xaxis = self.wavelength if self.wavelength is not None else np.arange(len(self))
fig = plt.figure() if figure is None else figure
ax = fig.add_subplot(111)
ax.plot(xaxis, data, **kwargs)
# This does not work very well for small ranges of masked elements.
# Probably requires some rethinking.
if mask_color is not None:
mask_indices = np.where(self.mask > 0)
kwargs['color'] = mask_color
ax.plot(xaxis[mask_indices], data[mask_indices], **kwargs)
if xlim is not None:
assert len(xlim) == 2
ax.set_xlim(*xlim)
if ylim is not None:
assert len(ylim) == 2
ax.set_ylim(*ylim)
if xlabel is None:
if self.wavelength is not None:
xlabel = 'Wavelength'
if self.wavelength_unit == 'Angstrom':
xlabel += r' $[\rm\AA]$'
elif self.wavelength_unit is not None:
xlabel += r' [{0}]'.format(self.wavelength_unit)
else:
xlabel = ''
if ylabel is None:
if array == 'flux':
ylabel = 'Flux'
if self.units == '1e-17 erg/s/cm^2/Ang/spaxel':
ylabel += r' $[\rm 10^{-17}\,erg\,s^{-1}\,cm^{-2}\,\AA^{-1}\,spaxel^{-1}]$'
elif self.units is not None:
ylabel += r' [{0}]'.format(self.units)
elif array == 'ivar':
ylabel = 'Inverse variance'
else:
ylabel = ''
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if return_figure:
return (ax, fig)
else:
return ax
| bsd-3-clause | -7,553,470,793,640,075,000 | 37.495146 | 95 | 0.562926 | false |
monome/ansible | tools/flash_tools/commands/extract/extract_presets.py | 1 | 1236 | import argparse
import json
from commands.extract.extractor import PresetExtractor
def extract(args):
extractor = PresetExtractor(args.firmware, args.version, args.hexfile)
presets, image = extractor.extract()
with open(args.out, 'w') as outf:
outf.write(json.dumps(
presets,
indent=4 if args.pretty else None,
))
print('{} preset written to {}'.format(extractor.target_version, args.out))
def command(parser):
parser.add_argument(
'hexfile',
type=str,
help='name of the hex dump file to inspect'
)
parser.add_argument(
'--version',
type=str,
help='firmware version of the ansible which saved the preset',
default='3.0.0'
)
parser.add_argument(
'--target_version',
type=str,
help='firmware version to target with the JSON output'
)
parser.add_argument(
'--out',
type=str,
help='JSON file to write the preset to',
default='ansible-preset.json'
)
parser.add_argument(
'--pretty',
action='store_true',
help='pretty-print the JSON output',
default=False,
)
parser.set_defaults(func=extract)
| gpl-2.0 | -2,786,293,025,625,807,400 | 25.297872 | 79 | 0.599515 | false |
hesamd/hazm | hazm/Lemmatizer.py | 1 | 6668 | # coding: utf-8
from __future__ import unicode_literals
import codecs
from .utils import default_words, default_verbs
from .Stemmer import Stemmer
from .WordTokenizer import WordTokenizer
class Lemmatizer(object):
"""
>>> lemmatizer = Lemmatizer()
>>> lemmatizer.lemmatize('کتابها')
'کتاب'
>>> lemmatizer.lemmatize('آتشفشان')
'آتشفشان'
>>> lemmatizer.lemmatize('میروم')
'رفت#رو'
>>> lemmatizer.lemmatize('گفته_شده_است')
'گفت#گو'
>>> lemmatizer.lemmatize('نچشیده_است')
'چشید#چش'
>>> lemmatizer.lemmatize('مردم', pos='N')
'مردم'
>>> lemmatizer.lemmatize('اجتماعی', pos='AJ')
'اجتماعی'
"""
def __init__(self, words_file=default_words, verbs_file=default_verbs, joined_verb_parts=True):
self.verbs = {}
self.words = set([])
self.stemmer = Stemmer()
if words_file:
with codecs.open(words_file, encoding='utf8') as words_file:
self.words = set(map(lambda w: w.strip(), words_file))
if verbs_file:
tokenizer = WordTokenizer(verbs_file=verbs_file)
self.verbs['است'] = '#است'
for verb in tokenizer.verbs:
for tense in self.conjugations(verb):
self.verbs[tense] = verb
if joined_verb_parts:
for verb in tokenizer.verbs:
bon = verb.split('#')[0]
for after_verb in tokenizer.after_verbs:
self.verbs[bon + 'ه_' + after_verb] = verb
self.verbs['ن' + bon + 'ه_' + after_verb] = verb
for before_verb in tokenizer.before_verbs:
self.verbs[before_verb + '_' + bon] = verb
def lemmatize(self, word, pos=''):
if not pos and word in self.words:
return word
if (not pos or pos == 'V') and word in self.verbs:
return self.verbs[word]
if pos.startswith('AJ') and word[-1] == 'ی':
return word
if pos == 'PRO':
return word
if word in self.words:
return word
stem = self.stemmer.stem(word)
if stem and stem in self.words:
return stem
return word
def conjugations(self, verb):
"""
>>> lemmatizer = Lemmatizer()
>>> lemmatizer.conjugations('خورد#خور')
['خوردم', 'خوردی', 'خورد', 'خوردیم', 'خوردید', 'خوردند', 'نخوردم', 'نخوردی', 'نخورد', 'نخوردیم', 'نخوردید', 'نخوردند', 'خورم', 'خوری', 'خورد', 'خوریم', 'خورید', 'خورند', 'نخورم', 'نخوری', 'نخورد', 'نخوریم', 'نخورید', 'نخورند', 'میخوردم', 'میخوردی', 'میخورد', 'میخوردیم', 'میخوردید', 'میخوردند', 'نمیخوردم', 'نمیخوردی', 'نمیخورد', 'نمیخوردیم', 'نمیخوردید', 'نمیخوردند', 'خوردهام', 'خوردهای', 'خورده', 'خوردهایم', 'خوردهاید', 'خوردهاند', 'نخوردهام', 'نخوردهای', 'نخورده', 'نخوردهایم', 'نخوردهاید', 'نخوردهاند', 'خورم', 'خوری', 'خورد', 'خوریم', 'خورید', 'خورند', 'نخورم', 'نخوری', 'نخورد', 'نخوریم', 'نخورید', 'نخورند', 'میخورم', 'میخوری', 'میخورد', 'میخوریم', 'میخورید', 'میخورند', 'نمیخورم', 'نمیخوری', 'نمیخورد', 'نمیخوریم', 'نمیخورید', 'نمیخورند', 'بخورم', 'بخوری', 'بخورد', 'بخوریم', 'بخورید', 'بخورند', 'نخورم', 'نخوری', 'نخورد', 'نخوریم', 'نخورید', 'نخورند', 'بخور', 'نخور']
>>> lemmatizer.conjugations('آورد#آور')
['آوردم', 'آوردی', 'آورد', 'آوردیم', 'آوردید', 'آوردند', 'نیاوردم', 'نیاوردی', 'نیاورد', 'نیاوردیم', 'نیاوردید', 'نیاوردند', 'آورم', 'آوری', 'آورد', 'آوریم', 'آورید', 'آورند', 'نیاورم', 'نیاوری', 'نیاورد', 'نیاوریم', 'نیاورید', 'نیاورند', 'میآوردم', 'میآوردی', 'میآورد', 'میآوردیم', 'میآوردید', 'میآوردند', 'نمیآوردم', 'نمیآوردی', 'نمیآورد', 'نمیآوردیم', 'نمیآوردید', 'نمیآوردند', 'آوردهام', 'آوردهای', 'آورده', 'آوردهایم', 'آوردهاید', 'آوردهاند', 'نیاوردهام', 'نیاوردهای', 'نیاورده', 'نیاوردهایم', 'نیاوردهاید', 'نیاوردهاند', 'آورم', 'آوری', 'آورد', 'آوریم', 'آورید', 'آورند', 'نیاورم', 'نیاوری', 'نیاورد', 'نیاوریم', 'نیاورید', 'نیاورند', 'میآورم', 'میآوری', 'میآورد', 'میآوریم', 'میآورید', 'میآورند', 'نمیآورم', 'نمیآوری', 'نمیآورد', 'نمیآوریم', 'نمیآورید', 'نمیآورند', 'بیاورم', 'بیاوری', 'بیاورد', 'بیاوریم', 'بیاورید', 'بیاورند', 'نیاورم', 'نیاوری', 'نیاورد', 'نیاوریم', 'نیاورید', 'نیاورند', 'بیاور', 'نیاور']
"""
past, present = verb.split('#')
ends = ['م', 'ی', '', 'یم', 'ید', 'ند']
if verb == '#هست':
return ['هست' + end for end in ends] + ['نیست' + end for end in ends]
past_simples = [past + end for end in ends]
past_imperfects = ['می' + item for item in past_simples]
ends = ['هام', 'های', 'ه', 'هایم', 'هاید', 'هاند']
past_narratives = [past + end for end in ends]
imperatives = ['ب' + present, 'ن' + present]
if present.endswith('ا') or present in ('آ', 'گو'):
present = present + 'ی'
ends = ['م', 'ی', 'د', 'یم', 'ید', 'ند']
present_simples = [present + end for end in ends]
present_imperfects = ['می' + item for item in present_simples]
present_subjunctives = [item if item.startswith('ب') else 'ب' + item for item in present_simples]
present_not_subjunctives = ['ن' + item for item in present_simples]
with_nots = lambda items: items + list(map(lambda item: 'ن' + item, items))
aa_refinement = lambda items: list(map(lambda item: item.replace('بآ', 'بیا').replace('نآ', 'نیا'), items)) if items[0].startswith('آ') else items
return aa_refinement(with_nots(past_simples) + with_nots(present_simples) + with_nots(past_imperfects) + with_nots(past_narratives) + with_nots(present_simples) + with_nots(present_imperfects) + present_subjunctives + present_not_subjunctives + imperatives)
| mit | -5,352,278,371,199,366,000 | 47.388889 | 952 | 0.628205 | false |
rsnakamura/oldape | apetools/devices/linuxdevice.py | 1 | 3377 |
from apetools.commands.ifconfig import IfconfigCommand
from apetools.commands.iwconfig import Iwconfig
from apetools.commons.enumerations import OperatingSystem
from apetools.devices.basedevice import BaseDevice
class LinuxDevice(BaseDevice):
"""
A class to configure and query linux devices
"""
def __init__(self, *args, **kwargs):
"""
:param:
- `connection`: a connection to the device
- `interface`: the name of the test interface (to get the ip address)
"""
super(LinuxDevice, self).__init__(*args, **kwargs)
self._ifconfig = None
self._wifi_query = None
return
@property
def ifconfig(self):
"""
:return: ifconfig command
"""
if self._ifconfig is None:
self._ifconfig = IfconfigCommand(connection=self.connection,
interface = self.interface,
operating_system=OperatingSystem.linux)
return self._ifconfig
@property
def wifi_query(self):
"""
:return: wifi_query command (Iwconfig command object)
"""
if self._wifi_query is None:
self._wifi_query = Iwconfig(connection=self.connection,
interface=self.interface)
return self._wifi_query
@property
def address(self):
"""
:return: the IP address of the device (using ifconfig)
"""
if self._address is None:
return self.ifconfig.ip_address
return self._address
@property
def mac_address(self):
"""
:return: the MAC address of the device (using ifconfig)
"""
return self.ifconfig.mac_address
@property
def bssid(self):
"""
:return: AP MAC address (using iwconfig)
"""
return self.wifi_query.bssid
@property
def ssid(self):
"""
:return: AP SSID (using iwconfig)
"""
return self.wifi_query.ssid
@property
def noise(self):
"""
:return: AP link noise (using iwconfig)
"""
return self.wifi_query.noise
@property
def channel(self):
"""
Not implemented
"""
self.logger.warning('channel query not implemented')
return "NA"
@property
def rssi(self):
"""
:return: rssi from the wifi_query
"""
return self.wifi_query.rssi
@property
def bitrate(self):
"""
:return: bitrate from the wifi_query
"""
return self.wifi_query.bitrate
def disable_wifi(self):
"""
runs 'rfkill block wifi' command
"""
self.connection.rfkill("block wifi")
return
def enable_wifi(self):
"""
run 'rfkill unblock wifi' command
"""
self.connection.rfkill("unblock wifi")
return
def log(self, message):
"""
Sends the message to the syslog
:param:
- `message`: a string to send to the syslog
:postcondition: message sent to the syslog
"""
# This uses the call interface because the connection has its own logger property
self.connection('logger', message)
return
# end class LinuxDevice
| apache-2.0 | -4,505,382,290,401,172,000 | 24.778626 | 90 | 0.551081 | false |
speignier/suppositoire | consistency_check.py | 1 | 3723 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 22:31:53 2015
@author: Ilya
"""
# %% prepare
import pandas as pd
from datetime import datetime
import calendar
import re
import numpy as np
dtt = lambda datetime_str: datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
df_train = pd.read_csv('../data/train.csv')
# %% consistency check
# Time:
dofw_c = map(lambda s: calendar.day_name[dtt(s).weekday()], df_train['Dates'].values)
print 'Days of week and dates are '+['INCONSISTENT','CONSISTENT'][int( (df_train['DayOfWeek']==dofw_c).all() )]
# Space:
# Split Address:
clean_street = lambda street: re.search('[0-9A-Z-]+ *[A-Z]{0,2}$',street.replace('/','').strip()).group(0)
streets_cleaned_splited = df_train['Address'].apply(lambda x: map(clean_street, x.split(' / ')))
df1=pd.concat([streets_cleaned_splited, df_train[['X','Y']]], axis=1)
# Split streets so to have pairs 1 Street - 1 XY:
df_streets_XY = pd.DataFrame.from_records([[lx]+list(l[1:]) for l in df1.values for lx in l[0]], columns=['Street', 'X', 'Y'])
# Compute quantiles to filter XY:
quantiles_to_compute = [0.05, 0.95]
quants = df_streets_XY.groupby('Street').quantile(quantiles_to_compute+[0.5]).rename(index=dict(zip(quantiles_to_compute, ['min','max'])))
#quants = pd.concat([quants, pd.concat([df_streets_XY.groupby('Street').mean()], keys=['mean']).swaplevel(0,1)], verify_integrity=True)
# widen borders
qut=quants.unstack().T
eps=0.5
ksi=2.0 # if 3 then 'bad' points dissappear from 'good's' area
qut=qut.T.swaplevel(0,1, axis=1)
qut['min'] = qut['min'] - eps*(qut[0.5]-qut['min']) # encircle streets locally
qut['max'] = qut['max'] - eps*(qut[0.5]-qut['max']) # encircle streets locally
qut['min'] -= ksi*df_streets_XY[['Y','X']].std() # encircle streets with the std of all points
qut['max'] += ksi*df_streets_XY[['Y','X']].std() # encircle streets with the std of all points
#qut['min'] = qut['min'] - eps*(qut['mean']-qut['min'])
#qut['max'] = qut['max'] - eps*(qut['mean']-qut['max'])
qut = qut[qut.columns[qut.columns.get_level_values(0)!=0.5]]
#qut = qut[qut.columns[qut.columns.get_level_values(0)!='mean']]
qut=qut.swaplevel(0,1, axis=1).T
# convert to tuples:
streets_cleaned_splited=pd.Series(map(tuple,streets_cleaned_splited), index=streets_cleaned_splited.index)
# remove dupes:
list_of_unique_streets=map(list, streets_cleaned_splited.unique())
# find bounds for X and Y
bnds = map(lambda x: qut[x].mean(axis=1).swaplevel(0,1), list_of_unique_streets)
dft=pd.concat(bnds, keys=streets_cleaned_splited.unique(), axis=1)
df_tuple_streets_XY = df_train[['X','Y']].set_index(streets_cleaned_splited) # similar ro df1
df_bounds_tuple_streets_XY = dft[streets_cleaned_splited].T
bool_idx=((df_bounds_tuple_streets_XY['min']<df_tuple_streets_XY) & (df_bounds_tuple_streets_XY['max']>df_tuple_streets_XY)).all(axis=1).values
df_train_good = df_train[bool_idx]
df_train_badgly = df_train[np.logical_not(bool_idx)]
# %% The spatial visualization of the result
import matplotlib.pylab as plt
print 'The good:'
print df_train_good.head()
ax_good = df_train_good.plot(kind='scatter',x='Y',y='X', title='The good', alpha=0.5)
print 'The bad and the ugly:'
print df_train_badgly.head()
ax = df_train_badgly.plot(kind='scatter',x='Y',y='X', title='The bad', alpha=0.5)
ax.set_xlim(ax_good.get_xlim())
ax.set_ylim(ax_good.get_ylim())
ax_bagly = df_train_badgly.plot(kind='scatter',x='Y',y='X', title='The ugly', alpha=0.5)
# Error ellipse
from matplotlib.patches import Ellipse
mn = tuple(df_streets_XY[['Y','X']].mean().values)
ksi_sd = tuple(ksi*df_streets_XY[['Y','X']].std().values)
elg = lambda : Ellipse(xy=mn, alpha=0.3, color='#aa7722', **dict(zip(['width','height'],ksi_sd)))
ax.add_patch(elg())
ax_bagly.add_patch(elg())
| gpl-2.0 | 5,107,858,348,160,119,000 | 41.306818 | 143 | 0.678754 | false |
mknecht/blohg | blohg/tests/rst_parser/directives.py | 1 | 14635 | # -*- coding: utf-8 -*-
"""
blohg.tests.rst_parser.directives
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Module with tests for the custom blohg reStructuredText directives.
:copyright: (c) 2010-2013 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
import mock
import unittest
from docutils.parsers.rst.directives import _directives, register_directive
from blohg.rst_parser import parser
from blohg.rst_parser.directives import Vimeo, Youtube, SourceCode, Math, \
AttachmentImage, AttachmentFigure, SubPages, IncludeHg
class DirectiveTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.name = 'blohg-%s' % self.directive.__name__.lower()
register_directive(self.name, self.directive)
def tearDown(self):
# FIXME: do not use the internal list directly (?)
del _directives[self.name]
unittest.TestCase.tearDown(self)
class VimeoTestCase(DirectiveTestCase):
directive = Vimeo
def test_run_with_default_opts(self):
content = parser('''\
asd
---
.. blohg-vimeo:: 34368016
''', 3)
self.assertIn('http://player.vimeo.com/video/34368016',
content['fragment'])
self.assertIn('width: 425px', content['fragment'])
self.assertIn('height: 344px', content['fragment'])
self.assertIn('align-center', content['fragment'])
self.assertIn('frameborder="0"', content['fragment'])
self.assertIn('allowfullscreen', content['fragment'])
def test_run(self):
content = parser('''\
asd
---
.. blohg-vimeo:: 34368016
:align: left
:width: 100
:height: 200
:allowfullscreen: false
:border: 1
''', 3)
self.assertIn('http://player.vimeo.com/video/34368016',
content['fragment'])
self.assertIn('width: 100px', content['fragment'])
self.assertIn('height: 200px', content['fragment'])
self.assertIn('align-left', content['fragment'])
self.assertIn('frameborder="1"', content['fragment'])
self.assertNotIn('allowfullscreen', content['fragment'])
class YoutubeTestCase(DirectiveTestCase):
directive = Youtube
def test_run_with_default_opts(self):
content = parser('''\
asd
---
.. blohg-youtube:: ssMfQ9ybTEc
''', 3)
self.assertIn('http://www.youtube.com/embed/ssMfQ9ybTEc',
content['fragment'])
self.assertIn('width: 425px', content['fragment'])
self.assertIn('height: 344px', content['fragment'])
self.assertIn('align-center', content['fragment'])
self.assertIn('frameborder="0"', content['fragment'])
self.assertIn('allowfullscreen', content['fragment'])
def test_run(self):
content = parser('''\
asd
---
.. blohg-youtube:: ssMfQ9ybTEc
:align: left
:width: 100
:height: 200
:allowfullscreen: false
:border: 1
''', 3)
self.assertIn('http://www.youtube.com/embed/ssMfQ9ybTEc',
content['fragment'])
self.assertIn('width: 100px', content['fragment'])
self.assertIn('height: 200px', content['fragment'])
self.assertIn('align-left', content['fragment'])
self.assertIn('frameborder="1"', content['fragment'])
self.assertNotIn('allowfullscreen', content['fragment'])
class SourceCodeTestCase(DirectiveTestCase):
directive = SourceCode
def test_run_with_default_opts(self):
content = parser('''\
asd
---
.. blohg-sourcecode:: python
import os
print os.path
''', 3)
self.assertIn('class="highlight"', content['fragment'])
self.assertIn('import', content['fragment'])
def test_run(self):
content = parser('''\
asd
---
.. blohg-sourcecode:: python
:linenos:
import os
print os.path
''', 3)
self.assertIn('class="lineno">3', content['fragment'])
self.assertIn('import', content['fragment'])
class MathTestCase(DirectiveTestCase):
directive = Math
def test_run_with_default_opts(self):
content = parser('''\
asd
---
.. blohg-math::
\\frac{x^2}{1+x}
''', 3)
self.assertIn('https://chart.googleapis.com/chart?cht=tx&chl='
'%5Cfrac%7Bx%5E2%7D%7B1%2Bx%7D', content['images'])
self.assertIn('https://chart.googleapis.com/chart?cht=tx&'
'chl=%5Cfrac%7Bx%5E2%7D%7B1%2Bx%7D', content['fragment'])
self.assertIn('align-center', content['fragment'])
def test_run(self):
content = parser('''\
asd
---
.. blohg-math::
:align: left
\\frac{x^2}{1+x}
''', 3)
self.assertIn('https://chart.googleapis.com/chart?cht=tx&chl='
'%5Cfrac%7Bx%5E2%7D%7B1%2Bx%7D', content['images'])
self.assertIn('https://chart.googleapis.com/chart?cht=tx&'
'chl=%5Cfrac%7Bx%5E2%7D%7B1%2Bx%7D', content['fragment'])
self.assertIn('align-left', content['fragment'])
class AttachmentImageTestCase(DirectiveTestCase):
directive = AttachmentImage
def setUp(self):
DirectiveTestCase.setUp(self)
self._current_app = mock.patch('blohg.rst_parser.directives.current_app')
self.current_app = self._current_app.start()
self.current_app.config = {'ATTACHMENT_DIR': 'content/att'}
self.current_app.blohg.changectx.files = ['content/att/foo.jpg']
self._url_for = mock.patch('blohg.rst_parser.directives.url_for')
self.url_for = self._url_for.start()
self.url_for.return_value = 'http://lol/foo.jpg'
def tearDown(self):
del self.url_for
del self.current_app
self._url_for.stop()
self._current_app.stop()
DirectiveTestCase.tearDown(self)
def test_run_with_default_opts(self):
content = parser('''\
asd
---
.. blohg-attachmentimage:: foo.jpg
''', 3)
self.url_for.assert_called_once_with('attachments', filename='foo.jpg',
_external=True)
self.assertIn('http://lol/foo.jpg', content['images'])
self.assertIn('src="http://lol/foo.jpg"', content['fragment'])
def test_run(self):
content = parser('''\
asd
---
.. blohg-attachmentimage:: foo.jpg
:align: left
''', 3)
self.url_for.assert_called_once_with('attachments', filename='foo.jpg',
_external=True)
self.assertIn('http://lol/foo.jpg', content['images'])
self.assertIn('src="http://lol/foo.jpg"', content['fragment'])
self.assertIn('align-left', content['fragment'])
class AttachmentFigureTestCase(DirectiveTestCase):
directive = AttachmentFigure
def setUp(self):
DirectiveTestCase.setUp(self)
self._current_app = mock.patch('blohg.rst_parser.directives.current_app')
self.current_app = self._current_app.start()
self.current_app.config = {'ATTACHMENT_DIR': 'content/att'}
self.current_app.blohg.changectx.files = ['content/att/foo.jpg']
self._url_for = mock.patch('blohg.rst_parser.directives.url_for')
self.url_for = self._url_for.start()
self.url_for.return_value = 'http://lol/foo.jpg'
def tearDown(self):
del self.url_for
del self.current_app
self._url_for.stop()
self._current_app.stop()
DirectiveTestCase.tearDown(self)
def test_run_with_default_opts(self):
content = parser('''\
asd
---
.. blohg-attachmentfigure:: foo.jpg
asdf lol.
''', 3)
self.url_for.assert_called_once_with('attachments', filename='foo.jpg',
_external=True)
self.assertIn('http://lol/foo.jpg', content['images'])
self.assertIn('src="http://lol/foo.jpg"', content['fragment'])
self.assertIn('caption">asdf lol.', content['fragment'])
def test_run(self):
content = parser('''\
asd
---
.. blohg-attachmentfigure:: foo.jpg
:align: left
asdf lol.
''', 3)
self.url_for.assert_called_once_with('attachments', filename='foo.jpg',
_external=True)
self.assertIn('http://lol/foo.jpg', content['images'])
self.assertIn('src="http://lol/foo.jpg"', content['fragment'])
self.assertIn('caption">asdf lol.', content['fragment'])
self.assertIn('align-left', content['fragment'])
class SubPagesTestCase(DirectiveTestCase):
directive = SubPages
def setUp(self):
DirectiveTestCase.setUp(self)
self._current_app = mock.patch('blohg.rst_parser.directives.current_app')
self.current_app = self._current_app.start()
self.current_app.config = {'CONTENT_DIR': 'cont', 'POST_EXT': '.rs'}
# FIXME: find a way to test sorting
m1 = mock.Mock(slug='foo', title='Foo :)')
m2 = mock.Mock(slug='bar', title='Bar!')
m3 = mock.Mock(slug='foo/bar', title='Foo Bar :P')
m4 = mock.Mock(slug='foo/bar/baz', title='Foo Bar Baz XD')
m5 = mock.Mock(slug='foo/bar/bad', title='Foo Bar Bad XD')
m1.get.side_effect = lambda x, y: y
m2.get.side_effect = lambda x, y: y
m3.get.side_effect = lambda x, y: y
m4.get.side_effect = lambda x, y: y
m5.get.side_effect = lambda x, y: '###asd###'
self.current_app.blohg.content.get_all.return_value = [m1, m2, m3, m4, m5]
self._url_for = mock.patch('blohg.rst_parser.directives.url_for')
self.url_for = self._url_for.start()
self.url_for.side_effect = lambda endpoint, slug: '/%s/' % slug
def tearDown(self):
del self.url_for
del self.current_app
self._url_for.stop()
self._current_app.stop()
DirectiveTestCase.tearDown(self)
def test_run_without_argument(self):
content = parser('''\
asd
---
.. blohg-subpages::
''', 3, ':repo:cont/index.rs')
self.assertIn('"/foo/"', content['fragment'])
self.assertIn('>Foo :)<', content['fragment'])
self.assertIn('"/bar/"', content['fragment'])
self.assertIn('>Bar!<', content['fragment'])
self.assertNotIn('"/foo/bar/"', content['fragment'])
self.assertNotIn('>Foo Bar :P<', content['fragment'])
self.assertNotIn('"/foo/bar/baz/"', content['fragment'])
self.assertNotIn('>Foo Bar Baz XD<', content['fragment'])
def test_run_without_argument_from_subpage(self):
content = parser('''\
asd
---
.. blohg-subpages::
''', 3, ':repo:cont/foo.rs')
self.assertNotIn('"/foo/"', content['fragment'])
self.assertNotIn('>Foo :)<', content['fragment'])
self.assertNotIn('"/bar/"', content['fragment'])
self.assertNotIn('>Bar!<', content['fragment'])
self.assertIn('"/foo/bar/"', content['fragment'])
self.assertIn('>Foo Bar :P<', content['fragment'])
self.assertNotIn('"/foo/bar/baz/"', content['fragment'])
self.assertNotIn('>Foo Bar Baz XD<', content['fragment'])
def test_run_with_argument(self):
content = parser('''\
asd
---
.. blohg-subpages:: foo
''', 3)
self.assertNotIn('"/foo/"', content['fragment'])
self.assertNotIn('>Foo :)<', content['fragment'])
self.assertNotIn('"/bar/"', content['fragment'])
self.assertNotIn('>Bar!<', content['fragment'])
self.assertIn('"/foo/bar/"', content['fragment'])
self.assertIn('>Foo Bar :P<', content['fragment'])
self.assertNotIn('"/foo/bar/baz/"', content['fragment'])
self.assertNotIn('>Foo Bar Baz XD<', content['fragment'])
def test_run_with_argument_from_subpage(self):
content = parser('''\
asd
---
.. blohg-subpages:: foo
''', 3, ':repo:cont/foo/index.rs')
self.assertNotIn('"/foo/"', content['fragment'])
self.assertNotIn('>Foo :)<', content['fragment'])
self.assertNotIn('"/bar/"', content['fragment'])
self.assertNotIn('>Bar!<', content['fragment'])
self.assertIn('"/foo/bar/"', content['fragment'])
self.assertIn('>Foo Bar :P<', content['fragment'])
self.assertNotIn('"/foo/bar/baz/"', content['fragment'])
self.assertNotIn('>Foo Bar Baz XD<', content['fragment'])
def test_run_with_argument_for_subsubpage(self):
content = parser('''\
asd
---
.. blohg-subpages:: foo/bar
''', 3)
self.assertNotIn('"/foo/"', content['fragment'])
self.assertNotIn('>Foo :)<', content['fragment'])
self.assertNotIn('"/bar/"', content['fragment'])
self.assertNotIn('>Bar!<', content['fragment'])
self.assertNotIn('"/foo/bar/"', content['fragment'])
self.assertNotIn('>Foo Bar :P<', content['fragment'])
self.assertIn('"/foo/bar/baz/"', content['fragment'])
self.assertIn('>Foo Bar Baz XD<', content['fragment'])
self.assertNotIn('>Foo Bar Bad XD<', content['fragment'])
self.assertIn('"/foo/bar/bad/"', content['fragment'])
self.assertIn('###asd###', content['fragment'])
class IncludeHgTestCase(DirectiveTestCase):
directive = IncludeHg
def setUp(self):
DirectiveTestCase.setUp(self)
self._current_app = mock.patch('blohg.file_like.current_app')
self.current_app = self._current_app.start()
self.current_app.config = {'ATTACHMENT_DIR': 'content/att'}
fctx1 = mock.Mock(path='content/inc.rst', content='''\
Included paragraph
------------------
lol
XD
''')
self.current_app.blohg.changectx.get_filectx.return_value = fctx1
def tearDown(self):
del self.current_app
self._current_app.stop()
DirectiveTestCase.tearDown(self)
def test_simple_include(self):
content = parser('''\
asd
===
hahah
.. blohg-includehg:: content/foo.rst
''', 3)
self.assertIn('<h3>Included paragraph</h3>', content['fragment'])
self.assertIn('<p>lol</p>', content['fragment'])
self.assertIn('<p>XD</p>', content['fragment'])
def test_include_with_start_and_end(self):
content = parser('''\
asd
===
hahah
.. blohg-includehg:: content/foo.rst
:start-line: 3
:end-line: 4
''', 3)
self.assertNotIn('<h3>Included paragraph</h3>', content['fragment'])
self.assertIn('<p>lol</p>', content['fragment'])
self.assertNotIn('<p>XD</p>', content['fragment'])
def test_include_literal(self):
content = parser('''\
asd
===
hahah
.. blohg-includehg:: content/foo.rst
:literal:
''', 3)
self.assertIn('\nIncluded paragraph\n', content['fragment'])
self.assertIn('\nlol\n', content['fragment'])
self.assertIn('\nXD\n', content['fragment'])
| gpl-2.0 | 5,583,305,482,015,392,000 | 30.204691 | 82 | 0.600683 | false |
thingslogic/thingslogic-scd | crate/tests/tests.py | 1 | 2116 | # -*- coding: utf-8 -*-
# vim: set fileencodings=utf-8
__docformat__ = "reStructuredText"
import os
import sys
import unittest
from requests.exceptions import ConnectionError
from docker import Client
from docker.utils import kwargs_from_env
from itests import SimpleRunTest, JavaPropertiesTest, \
EnvironmentVariablesTest, SigarStatsTest, TarballRemovedTest
DIR = os.path.dirname(__file__)
class RuntimeError(Exception):
pass
class DockerLayer(object):
def __init__(self, name, tag):
self.__name__ = name
self.__bases__ = tuple([])
self.tag = tag
self.client = Client(base_url='unix://var/run/docker.sock')
try:
self.client.ping()
except ConnectionError as e:
# http://docker-py.readthedocs.org/en/latest/boot2docker/
kwargs = kwargs_from_env()
kwargs['tls'].assert_hostname = False
self.client = Client(**kwargs)
def setUp(self):
if self.client.ping() == u'OK':
self.start()
else:
raise RuntimeError('Docker is not available.\n'
'Make sure you have Docker installed before running tests.\n'
'Visit https://docker.com for installation instructions.')
def start(self):
sys.stdout.write('\nBuilding container {}\n'.format(self.tag))
for line in self.client.build(
path=os.path.abspath(os.path.join(DIR, '..')),
tag=self.tag, rm=True, forcerm=True):
sys.stdout.write('.')
sys.stdout.write('\n')
def tearDown(self):
self.stop()
def stop(self):
self.client.close()
def test_suite():
docker_layer = DockerLayer('docker', 'crate/crate:test')
suite = unittest.TestSuite()
suite.addTest(SimpleRunTest(docker_layer))
suite.addTest(JavaPropertiesTest(docker_layer))
suite.addTest(EnvironmentVariablesTest(docker_layer))
suite.addTest(SigarStatsTest(docker_layer))
suite.addTest(TarballRemovedTest(docker_layer))
suite.layer = docker_layer
return suite
| apache-2.0 | 851,551,939,712,458,400 | 28.802817 | 92 | 0.623346 | false |
miing/mci_migo | acceptance/tests/emails/email_token_link.py | 1 | 1115 | # 6) Ensure tokens work from the email, and also clicking the
# link in the email works.
from sst.actions import (
assert_element,
assert_title_contains,
click_button,
get_element,
go_to,
wait_for,
write_textfield,
)
from u1testutils import mail
from u1testutils.sso import mail as sso_mail
from u1testutils.sst import config
from acceptance import helpers, urls
config.set_base_url_from_env()
NAME = 'Some Name'
# Register the primary account.
primary_email = helpers.register_account(displayname=NAME)
# Register a secondary email, and grab the link from the email sent to
# the secondary address.
secondary_email = mail.make_unique_test_email_address()
go_to(urls.EMAILS)
wait_for(assert_title_contains, "'s email addresses")
write_textfield('id_newemail', secondary_email)
click_button(get_element(name='continue'))
link = sso_mail.get_verification_link_for_address(secondary_email)
# Follow the link from the email to ensure it verifies the secondary
# address.
go_to(link)
click_button(get_element(name='continue'))
wait_for(assert_element, **{'data-qa-id': 'edit_account'})
| agpl-3.0 | 1,245,991,233,961,042,700 | 28.342105 | 70 | 0.749776 | false |
repotvsupertuga/repo | plugin.video.zen/resources/lib/sources/watchfilm.py | 1 | 4031 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
import requests
from resources.lib.modules import client
from resources.lib.modules import directstream
from BeautifulSoup import BeautifulSoup
from resources.lib.modules import jsunpack
from schism_net import OPEN_URL
from schism_commons import quality_tag, google_tag, parseDOM, replaceHTMLCodes ,cleantitle_get, cleantitle_query
class source:
def __init__(self):
self.base_link = 'http://watchfilm.to'
self.movie_link = '/movies/%s/'
self.ep_link = '/episode/%s/'
def movie(self, imdb, title, year):
self.zen_url = []
try:
# print("WATCHCARTOON")
title = cleantitle_query(title)
title = title.replace(' ','-')
query = self.movie_link % (title)
url = urlparse.urljoin(self.base_link, query)
return url
except:
return
# http://blackcinema.org/episodes/ash-vs-evil-dead-1x2/
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
data['season'], data['episode'] = season, episode
episodeid = "%01dx%01d" % (int(data['season']) , int(data['episode']))
title = cleantitle_query(title)
title = title.replace(' ','-')
query = title + "-" + episodeid
query= self.ep_link % query
url = urlparse.urljoin(self.base_link, query)
print("Watchfilm TV SHOW", url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
try:
link = OPEN_URL(url, timeout='10')
print("Watchfilm link", link.content)
html = link.content
r = re.compile('<a href="(.+?)" target="streamplayer">').findall(html)
for result in r:
print("Watchfilm SOURCES", result)
result = result.encode('utf-8')
if result.startswith("//"): result = "http:" + result
if "player.watchfilm.to" in result:
try:
s = OPEN_URL(result, timeout='10')
s = s.content
match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(s)
for href, quality in match:
quality = google_tag(href)
print("WONLINE SCRIPTS", href,quality)
sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': True, 'debridonly': False})
except:
pass
try:
s = OPEN_URL(result, timeout='10')
s = s.content
match = re.compile('var ff =\s*"(.+?)";').findall(s)
for href in match:
quality = "SD"
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0]
except: host = 'none'
url = replaceHTMLCodes(href)
url = url.encode('utf-8')
if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': False, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | -2,366,760,702,463,126,000 | 28.423358 | 156 | 0.639295 | false |
kamilkloch/pdr-dfki | python/database.py | 1 | 3427 | from __future__ import (absolute_import, division, print_function)
from couchdb import Server
from couchdb.design import ViewDefinition
class Database(object):
""" TODO: docstring
"""
def __init__(self, server_url=u'http://127.0.0.1:5984/', db_name='ble-new'):
# 'http://dfki-1239.dfki.uni-kl.de:5984/'
self.server_url, self.db_name = server_url, db_name
self.couch = Server(self.server_url)
self.db = self.couch[self.db_name]
def __getitem__(self, doc_id):
""" returns the database document
"""
return self.db[doc_id]
def _sync_permanent_views(self):
view = ViewDefinition('elvis',
'newest_location_documents_from_elvis', '''
function(doc) {
if (doc.source && doc.source == "elvis" && doc.location)
emit(doc.dest, doc.location.positions[0].timestamp);
}''',
'''
function(keys, values, rereduce) {
if (rereduce) {
var result = {
id: 'fffaef464c42c6ffe0285be3d7da3684',
timestamp: '2113-08-04 19:09:24:089'
};
return (result);
} else {
var result = {
id: keys[0][1],
timestamp: values[0]
};
for (var i = 1, e = keys.length; i < e; ++i) {
if (values[i] > result.timestamp) {
result.timestamp = values[i];
result.id = keys[i][1];
}
}
return (result);
}
}'''
)
view.sync(self.db)
view = ViewDefinition('elvis', 'all_location_documents_from_elvis', '''
function(doc) {
if (doc.source && doc.source == "elvis" && doc.location)
emit([doc.location.positions[doc.location.positions.length-1].timestamp, doc.dest]);
}'''
)
view.sync(self.db)
view = ViewDefinition('elvis', 'all_ble_documents', '''
function(doc) {
if (doc.ble)
emit([doc.ble[doc.ble.length-1].timestamp, doc.source]);
}'''
)
view.sync(self.db)
view = ViewDefinition('elvis', "all_location_documents_from_reckonme", '''
function(doc) {
if (doc.dest && doc.source && doc.timestamp && doc.location && doc.dest == 'elvis') {
emit([doc.timestamp, doc.source])
}
}'''
)
view.sync(self.db)
def view_result(self, view_str):
""" returns a representation of a parameterized view
(either permanent or temporary)
"""
return self.db.view("_design/elvis/_view/" + view_str)
def test():
db = Database()
db._sync_permanent_views()
print(len(list(db.view_result(u'all_location_documents_from_reckonme'))))
for row in db.view_result('all_location_documents_from_reckonme'):
print(row.id)
print(db[row.id])
break
if __name__ == '__main__':
test() | mit | 6,161,475,019,724,955,000 | 33.626263 | 104 | 0.457251 | false |
madoodia/codeLab | python/modules_platform.py | 1 | 1829 | # ===============================================
# MODULE STUDY: platform
import platform
################################ Cross Platform ################################
platform.architecture() # Returns a tuple (bits, linkage)
platform.machine() # Returns the machine type, e.g. 'i386'
platform.node() # Returns the computer’s network name
platform.platform() # Returns a single string identifying the underlying platform with as much useful information as possible.
platform.processor() # Returns the (real) processor name, e.g. 'amdk6'.
platform.python_build() # Returns a tuple (buildno, builddate) stating the Python build number and date as strings.
platform.python_compiler() # Returns a string identifying the compiler used for compiling Python.
platform.python_branch() # Returns a string identifying the Python implementation SCM branch.
platform.python_implementation() # Returns a string identifying the Python implementation
platform.python_revision() # Returns a string identifying the Python implementation SCM revision.
platform.python_version() # Returns the Python version as string 'major.minor.patchlevel'
platform.python_version_tuple() # Returns the Python version as tuple (major, minor, patchlevel) of strings.
platform.release() # Returns the system’s release, e.g. '2.2.0' or 'NT'
platform.system() # Returns the system/OS name, e.g. 'Linux', 'Windows', or 'Java'
platform.version() # Returns the system’s release version
platform.uname() # Fairly portable uname interface.
# Returns a tuple of strings (system, node, release, version, machine, processor) identifying the underlying platform.
platform.win32_ver() # Availability: windows
| mit | 965,381,198,625,149,200 | 39.511111 | 146 | 0.664838 | false |
BrewCenter/BrewCenterAPI | brewcenter_api/brew_data/data_miner/brew_target/fermentables.py | 1 | 4778 | """
Extracts Fermentables from the database, transforms them, and builds a new db.
"""
from brew_data.data_miner.brew_target.utils import clean, convert_country
class Fermentable:
def __init__(self, data):
self.name = data[0]
self.type = data[1]
self.potential = data[2]
self.lovibond = data[3]
self.origin = data[4]
self.supplier = data[5]
self.notes = clean(data[6])
self.coarse_fine_diff = data[7]
self.moisture = data[8]
self.diastatic_power = data[9]
self.protein = data[10]
self.max_in_batch = data[11]
self.is_mashed = data[12]
self.transform()
def transform(self):
"""transforms the data as neccessary to fit our specs"""
self.name = '"' + self.name + '"'
# convert boolean to integer for sqlite
self.is_mashed = (1 if self.is_mashed == 'true' else 0)
# Sugar has a PPG of 46. Multiply the potential percent yield by 46 to
# get PPG of a grain
self.ppg = 46 * (self.potential / 100)
self.country = convert_country(self.origin)
# parse type
if self.type == "Extract":
self.type = "Liquid Malt Extract"
elif self.type == "Dry Extract":
self.type = "Dry Malt Extract"
if len(self.type) == 0:
self.type = "NULL"
else:
self.type = '"' + self.type + '"'
# convert "None" notes to empty
if self.notes is None:
self.notes = '""'
else:
self.notes = '"' + self.notes + '"'
def get_keys():
return ("name, type_id, country_id, notes, ppg, lovibond, moisture, "
"diastatic_power, protein, max_in_batch, is_mashed")
def __str__(self):
format_str = '{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10}'
return format_str.format(
self.name,
self.type_id,
self.country_id,
self.notes,
self.ppg,
self.lovibond,
self.moisture,
self.diastatic_power,
self.protein,
self.max_in_batch,
self.is_mashed,
)
def get_fermentables(s, d):
"""
Gets fermentables from the source (s) and puts them in the destination (d).
"""
d.execute('DROP TABLE IF EXISTS fermentabletype;')
d.execute('DROP TABLE IF EXISTS fermentable;')
d.execute('CREATE TABLE fermentabletype(name TEXT, abbreviation TEXT);')
d.execute('CREATE TABLE fermentable(' \
'name TEXT,' \
'type_id int,' \
'country_id int,' \
'ppg FLOAT,' \
'lovibond FLOAT,' \
'moisture FLOAT,' \
'diastatic_power FLOAT,' \
'protein FLOAT,' \
'max_in_batch FLOAT,' \
'is_mashed INT,' \
'notes TEXT' \
');'
)
s.execute('SELECT "name", "ftype", "yield", "color", "origin", "supplier", "notes", "coarse_fine_diff", "moisture", "diastatic_power", "protein", "max_in_batch", "is_mashed" FROM fermentable WHERE `deleted`=0;')
cur = s.fetchone()
n = 0
while cur:
f = Fermentable(cur)
# check if the country code exists already and add it if it does not
f.country_id = 'NULL'
if f.country is not 'NULL':
d.execute('SELECT `rowid` FROM countrycode WHERE code={0};'.format(f.country))
country_code_id = d.fetchone()
if country_code_id is None:
d.execute('INSERT INTO countrycode(code) VALUES ({0});'.format(f.country))
d.execute('SELECT `rowid` FROM countrycode WHERE code={0};'.format(f.country))
country_code_id = d.fetchone()
f.country_id = country_code_id[0] if country_code_id else 'NULL'
# check if the type already exists and add it if it does not
f.type_id = 'NULL'
if f.type is not 'NULL':
d.execute('SELECT `rowid` FROM fermentabletype WHERE name={0};'.format(f.type))
type_id = d.fetchone()
if type_id is None:
d.execute('INSERT INTO fermentabletype(name) VALUES({0});'.format(f.type))
d.execute('SELECT `rowid` FROM fermentabletype WHERE name={0};'.format(f.type))
type_id = d.fetchone()
f.type_id = type_id[0] if type_id else 'NULL'
query = 'INSERT INTO fermentable({0}) VALUES({1});'.format(Fermentable.get_keys(), str(f))
d.execute(query)
n += 1
cur = s.fetchone()
print("Found {0} fermentables.".format(n))
| gpl-3.0 | 4,258,352,382,892,321,300 | 36.328125 | 216 | 0.525952 | false |
hortonworks/hortonworks-sandbox | tutorials/tutorials_app/urls.py | 1 | 1395 | # Licensed to Hortonworks, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Hortonworks, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import settings
from django.conf.urls.defaults import *
urlpatterns = patterns('tutorials_app.views',
(r'^$', 'index'),
(r'^content/(.*)$', 'content'),
(r'^sync/$', 'sync_location'),
(r'^netinfo/$', 'network_info'),
(r'^file/(?P<path>.*)$', 'get_file'),
)
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT, 'show_indexes': True}),
)
| apache-2.0 | -554,571,369,595,481,200 | 44 | 88 | 0.61147 | false |
brainix/pottery | tests/test_nextid.py | 1 | 3438 | # --------------------------------------------------------------------------- #
# test_nextid.py #
# #
# Copyright © 2015-2021, Rajiv Bakulesh Shah, original author. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at: #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
'Distributed Redis-powered monotonically increasing ID generator tests.'
import unittest.mock
from redis.exceptions import TimeoutError
from pottery import NextId
from pottery import QuorumIsImpossible
from pottery import QuorumNotAchieved
from tests.base import TestCase # type: ignore
class NextIdTests(TestCase):
'Distributed Redis-powered monotonically increasing ID generator tests.'
def setUp(self):
super().setUp()
self.redis.delete('nextid:current')
self.ids = NextId(masters={self.redis})
for master in self.ids.masters:
master.set(self.ids.key, 0)
def test_nextid(self):
for id_ in range(1, 10):
with self.subTest(id_=id_):
assert next(self.ids) == id_
def test_iter(self):
assert iter(self.ids) is self.ids
def test_next_quorumnotachieved(self):
with self.assertRaises(QuorumNotAchieved), \
unittest.mock.patch.object(
next(iter(self.ids.masters)),
'get',
) as get:
get.side_effect = TimeoutError
next(self.ids)
with self.assertRaises(QuorumNotAchieved), \
unittest.mock.patch.object(
self.ids,
'_set_id_script',
) as _set_id_script:
_set_id_script.side_effect = TimeoutError
next(self.ids)
def test_next_quorumisimpossible(self):
self.ids = NextId(masters={self.redis}, raise_on_redis_errors=True)
with self.assertRaises(QuorumIsImpossible), \
unittest.mock.patch.object(
next(iter(self.ids.masters)),
'get',
) as get:
get.side_effect = TimeoutError
next(self.ids)
with self.assertRaises(QuorumIsImpossible), \
unittest.mock.patch.object(
self.ids,
'_set_id_script',
) as _set_id_script:
_set_id_script.side_effect = TimeoutError
next(self.ids)
def test_repr(self):
assert repr(self.ids) == '<NextId key=nextid:current value=0>'
| apache-2.0 | -3,632,889,957,568,443,000 | 39.435294 | 79 | 0.507128 | false |
Zen-CODE/kivybits | Examples/ColorPickerPopup/main.py | 1 | 2985 | '''
Demo Popup with a ColorPicker
'''
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.colorpicker import ColorPicker
from kivy.uix.popup import Popup
from kivy.uix.label import Label
class ColorPopup(App):
'''
This class represent you application. There should be only one per app.
'''
def build(self):
'''
This method is called automatically and should return your "root"
widget.
'''
self.label = Label(text="Colour not selected.")
layout = BoxLayout(
orientation="vertical",
padding=[50, 50, 50, 50])
layout.add_widget(self.label)
layout.add_widget(
Button(
text="Select colour",
on_release=self.select_color))
layout.add_widget(
Button(
text="OK and Cancel",
on_release=lambda inst: self.select_color(inst, False)))
return layout
def select_color(self, instance, no_buts=True):
'''
The button click has fired the event, so show the popup.
no_buts is boolean and specifies whether to include buttons
in the popup or not.
'''
popup = Popup(
title="Select your colour",
size_hint=(0.75, 0.75))
# NOTE: the below properties can also be passed in to the Popup
# constructor but we do them separately for clarity.
if no_buts:
colorPicker = ColorPicker()
popup.bind(
on_dismiss=lambda popup: \
self.popup_dismissed(popup, colorPicker.hex_color))
popup.content = colorPicker
else:
# We prevent the default dismiss behaviour and roll our own in
# the content.
popup.auto_dismiss = False
popup.content = self.get_ok_cancel_content(popup)
popup.open()
def popup_dismissed(self, popup, color):
''' The popup has been dismissed'''
self.label.text = "Colour in hex = " + color
def get_ok_cancel_content(self, popup):
'''Return content with OK and cancel buttons for validating'''
colorPicker = ColorPicker()
buttonLayout = BoxLayout(orientation="horizontal",
padding="5sp",
size_hint_y=0.2)
okButton = Button(
text="Okay",
on_release=lambda but: \
popup.dismiss() and \
self.popup_dismissed(popup, colorPicker.hex_color))
cancelButton = Button(
text="Cancel",
on_release=lambda but: popup.dismiss())
buttonLayout.add_widget(okButton)
buttonLayout.add_widget(cancelButton)
mainLayout = BoxLayout(orientation="vertical")
mainLayout.add_widget(colorPicker)
mainLayout.add_widget(buttonLayout)
return mainLayout
if __name__ == '__main__':
ColorPopup().run()
| mit | -1,846,627,650,480,030,200 | 32.166667 | 75 | 0.586265 | false |
pkrebs/WIDPS | fw_modules/module_daemon.py | 1 | 5578 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
#
# module_daemon.py - WIDS/WIPS framework frame daemon base class module
# Copyright (C) 2009 Peter Krebs, Herbert Haas
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the
# Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see http://www.gnu.org/licenses/gpl-2.0.html
"""Daemon module template
Provides the Daemon class which turns another python class into a daemon process.
This module was thankfully obtained from Sander Marechal at:
http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python
"""
# Imports
#
# Custom modules
# Standard modules
import atexit
import os
from signal import SIGTERM, SIGKILL
import sys
import time
class DaemonClass():
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.pid = None
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
#os.chdir("/")
os.chdir(os.getcwd()) # set current working directory instead of root
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
#atexit.register(self.delpid)
self.pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % self.pid)
def delpid(self):
"""
Removes the pidfile.
"""
try:
os.remove(self.pidfile)
except OSError:
print "No pidfile to remove"
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
killcounter = 0
kill_threshold = 20
try:
while 1:
os.kill(pid, SIGTERM)
killcounter = killcounter + 1
if killcounter > kill_threshold:
message = "Process not reacting, sending SIGKILL\n"
sys.stderr.write(message)
os.kill(pid, SIGKILL)
killcounter = 0
time.sleep(1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
pass
if __name__ == "__main__":
print "Warning: This module is not intended to be executed directly. Only do this for test purposes." | gpl-2.0 | 7,683,708,546,887,967,000 | 28.209424 | 110 | 0.542668 | false |
fyabc/MiniGames | HearthStone2/MyHearthStone/ui/ui_pyqt/ui_dialog_create_deck.py | 1 | 11521 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dialog_create_deck.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogCreateDeck(object):
def setupUi(self, DialogCreateDeck):
DialogCreateDeck.setObjectName("DialogCreateDeck")
DialogCreateDeck.resize(351, 418)
self.verticalLayout = QtWidgets.QVBoxLayout(DialogCreateDeck)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setObjectName("verticalLayout")
self.group_class = QtWidgets.QGroupBox(DialogCreateDeck)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.group_class.setFont(font)
self.group_class.setObjectName("group_class")
self.gridLayout_2 = QtWidgets.QGridLayout(self.group_class)
self.gridLayout_2.setObjectName("gridLayout_2")
self.radioButton_Druid = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Druid.setFont(font)
self.radioButton_Druid.setChecked(True)
self.radioButton_Druid.setObjectName("radioButton_Druid")
self.gridLayout_2.addWidget(self.radioButton_Druid, 0, 0, 1, 1)
self.radioButton_Hunter = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Hunter.setFont(font)
self.radioButton_Hunter.setObjectName("radioButton_Hunter")
self.gridLayout_2.addWidget(self.radioButton_Hunter, 0, 1, 1, 1)
self.radioButton_Mage = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Mage.setFont(font)
self.radioButton_Mage.setObjectName("radioButton_Mage")
self.gridLayout_2.addWidget(self.radioButton_Mage, 0, 2, 1, 1)
self.radioButton_Priest = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Priest.setFont(font)
self.radioButton_Priest.setObjectName("radioButton_Priest")
self.gridLayout_2.addWidget(self.radioButton_Priest, 1, 0, 1, 1)
self.radioButton_Shaman = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Shaman.setFont(font)
self.radioButton_Shaman.setObjectName("radioButton_Shaman")
self.gridLayout_2.addWidget(self.radioButton_Shaman, 1, 1, 1, 1)
self.radioButton_Rogue = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Rogue.setFont(font)
self.radioButton_Rogue.setObjectName("radioButton_Rogue")
self.gridLayout_2.addWidget(self.radioButton_Rogue, 1, 2, 1, 1)
self.radioButton_Paladin = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Paladin.setFont(font)
self.radioButton_Paladin.setObjectName("radioButton_Paladin")
self.gridLayout_2.addWidget(self.radioButton_Paladin, 2, 0, 1, 1)
self.radioButton_Warlock = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Warlock.setFont(font)
self.radioButton_Warlock.setObjectName("radioButton_Warlock")
self.gridLayout_2.addWidget(self.radioButton_Warlock, 2, 1, 1, 1)
self.radioButton_Warrior = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Warrior.setFont(font)
self.radioButton_Warrior.setObjectName("radioButton_Warrior")
self.gridLayout_2.addWidget(self.radioButton_Warrior, 2, 2, 1, 1)
self.radioButton_Monk = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_Monk.setFont(font)
self.radioButton_Monk.setObjectName("radioButton_Monk")
self.gridLayout_2.addWidget(self.radioButton_Monk, 3, 0, 1, 1)
self.radioButton_DeathKnight = QtWidgets.QRadioButton(self.group_class)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_DeathKnight.setFont(font)
self.radioButton_DeathKnight.setObjectName("radioButton_DeathKnight")
self.gridLayout_2.addWidget(self.radioButton_DeathKnight, 3, 1, 1, 1)
self.radioButton_Druid.raise_()
self.radioButton_Hunter.raise_()
self.radioButton_Mage.raise_()
self.radioButton_Priest.raise_()
self.radioButton_Shaman.raise_()
self.radioButton_Rogue.raise_()
self.radioButton_Paladin.raise_()
self.radioButton_Paladin.raise_()
self.radioButton_Warlock.raise_()
self.radioButton_Warrior.raise_()
self.radioButton_Monk.raise_()
self.radioButton_DeathKnight.raise_()
self.verticalLayout.addWidget(self.group_class)
self.line = QtWidgets.QFrame(DialogCreateDeck)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.group_mode = QtWidgets.QGroupBox(DialogCreateDeck)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.group_mode.setFont(font)
self.group_mode.setObjectName("group_mode")
self.gridLayout = QtWidgets.QGridLayout(self.group_mode)
self.gridLayout.setObjectName("gridLayout")
self.radioButton_standard = QtWidgets.QRadioButton(self.group_mode)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_standard.setFont(font)
self.radioButton_standard.setChecked(True)
self.radioButton_standard.setObjectName("radioButton_standard")
self.gridLayout.addWidget(self.radioButton_standard, 0, 0, 1, 1)
self.radioButton_wild = QtWidgets.QRadioButton(self.group_mode)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioButton_wild.setFont(font)
self.radioButton_wild.setObjectName("radioButton_wild")
self.gridLayout.addWidget(self.radioButton_wild, 0, 1, 1, 1)
self.verticalLayout.addWidget(self.group_mode)
self.line_2 = QtWidgets.QFrame(DialogCreateDeck)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_deck_name = QtWidgets.QLabel(DialogCreateDeck)
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_deck_name.setFont(font)
self.label_deck_name.setObjectName("label_deck_name")
self.horizontalLayout.addWidget(self.label_deck_name, 0, QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.edit_deck_name = QtWidgets.QLineEdit(DialogCreateDeck)
self.edit_deck_name.setObjectName("edit_deck_name")
self.horizontalLayout.addWidget(self.edit_deck_name, 0, QtCore.Qt.AlignTop)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(DialogCreateDeck)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox, 0, QtCore.Qt.AlignBottom)
self.retranslateUi(DialogCreateDeck)
self.buttonBox.accepted.connect(DialogCreateDeck.accept)
self.buttonBox.rejected.connect(DialogCreateDeck.reject)
QtCore.QMetaObject.connectSlotsByName(DialogCreateDeck)
def retranslateUi(self, DialogCreateDeck):
_translate = QtCore.QCoreApplication.translate
DialogCreateDeck.setWindowTitle(_translate("DialogCreateDeck", "新建套牌"))
self.group_class.setTitle(_translate("DialogCreateDeck", "选择职业"))
self.radioButton_Druid.setText(_translate("DialogCreateDeck", "德鲁伊"))
self.radioButton_Hunter.setText(_translate("DialogCreateDeck", "猎人"))
self.radioButton_Mage.setText(_translate("DialogCreateDeck", "法师"))
self.radioButton_Priest.setText(_translate("DialogCreateDeck", "牧师"))
self.radioButton_Shaman.setText(_translate("DialogCreateDeck", "萨满祭司"))
self.radioButton_Rogue.setText(_translate("DialogCreateDeck", "潜行者"))
self.radioButton_Paladin.setText(_translate("DialogCreateDeck", "圣骑士"))
self.radioButton_Warlock.setText(_translate("DialogCreateDeck", "术士"))
self.radioButton_Warrior.setText(_translate("DialogCreateDeck", "战士"))
self.radioButton_Monk.setText(_translate("DialogCreateDeck", "武僧"))
self.radioButton_DeathKnight.setText(_translate("DialogCreateDeck", "死亡骑士"))
self.group_mode.setTitle(_translate("DialogCreateDeck", "选择模式"))
self.radioButton_standard.setText(_translate("DialogCreateDeck", "标准模式"))
self.radioButton_wild.setText(_translate("DialogCreateDeck", "狂野模式"))
self.label_deck_name.setText(_translate("DialogCreateDeck", "套牌名称"))
| mit | 2,872,133,755,172,138,000 | 48.415584 | 106 | 0.687516 | false |
natanocr/instadown | instadown.py | 1 | 1429 | import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from flask import Flask, request, render_template
import requests
from bs4 import BeautifulSoup
import os
app = Flask(__name__)
header = {"User-Agent":"instadown", "e-mail":"[email protected]"}
def get_data(url):
r = requests.get(url, headers=header)
_url_video = ''
if r.status_code == 200:
sopa = BeautifulSoup(r.content)
for meta in sopa.findAll("meta"):
if meta.get("property") == "og:title" and meta.get("content") != None:
_content_title = meta.get("content")
if meta.get("property") == "og:video" and meta.get("content") != None:
_url_video = meta.get("content")
elif meta.get("property") == "og:image" and meta.get("content") != None:
_url_image = meta.get("content")
if _url_video == '':
return dict(title=_content_title, image=_url_image)
else:
return dict(title=_content_title, video=_url_video)
return None
@app.route('/', methods=['GET', 'POST'])
def post():
if request.method == 'POST':
_url = request.form['url']
data = get_data(_url)
print data
return render_template('home.html', content_dow=data)
return render_template('home.html')
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| mit | 5,269,061,608,380,642,000 | 29.404255 | 84 | 0.582225 | false |
conan-io/conan | conans/test/functional/toolchains/test_txt_cmdline.py | 1 | 2050 | import platform
import textwrap
import unittest
import pytest
from conans.test.utils.tools import TestClient
@pytest.mark.toolchain
class TestTxtCommandLine(unittest.TestCase):
def test_declarative(self):
conanfile = textwrap.dedent("""
[generators]
CMakeToolchain
MesonToolchain
""")
client = TestClient()
client.save({"conanfile.txt": conanfile})
client.run("install .")
self._check(client)
def _check(self, client):
self.assertIn("conanfile.txt: Generator 'CMakeToolchain' calling 'generate()'", client.out)
self.assertIn("conanfile.txt: Generator 'MesonToolchain' calling 'generate()'", client.out)
toolchain = client.load("conan_toolchain.cmake")
self.assertIn("Conan automatically generated toolchain file", toolchain)
toolchain = client.load("conan_meson_native.ini")
self.assertIn("[project options]", toolchain)
def test_command_line(self):
client = TestClient()
client.save({"conanfile.txt": ""})
client.run("install . -g CMakeToolchain -g MesonToolchain ")
self._check(client)
@pytest.mark.tool_visual_studio
@pytest.mark.skipif(platform.system() != "Windows", reason="Only for windows")
class TestTxtCommandLineMSBuild(unittest.TestCase):
def test_declarative(self):
conanfile = textwrap.dedent("""
[generators]
MSBuildToolchain
""")
client = TestClient()
client.save({"conanfile.txt": conanfile})
client.run("install .")
self._check(client)
def _check(self, client):
self.assertIn("conanfile.txt: Generator 'MSBuildToolchain' calling 'generate()'", client.out)
toolchain = client.load("conantoolchain.props")
self.assertIn("<?xml version", toolchain)
def test_command_line(self):
client = TestClient()
client.save({"conanfile.txt": ""})
client.run("install . -g MSBuildToolchain")
self._check(client)
| mit | -3,610,724,573,420,775,400 | 32.064516 | 101 | 0.64 | false |
danmoser/pyhdust | pyhdust/rotstars.py | 1 | 28843 | # -*- coding:utf-8 -*-
"""PyHdust *rotstars* module: Rotating stars tools.
:co-author: Rodrigo Vieira
:license: GNU GPL v3.0 https://github.com/danmoser/pyhdust/blob/master/LICENSE
"""
from __future__ import print_function
import os as _os
import re as _re
import numpy as _np
from pyhdust import hdtpath as _hdtpath
from scipy.interpolate import griddata as _griddata
import pyhdust.phc as _phc
import tarfile as _tarfile
import warnings as _warn
# try:
# import matplotlib.pyplot as _plt
# from scipy import interpolate as _interpolate
# except:
# print('# Warning! matplotlib and/or scipy module not installed!!!')
__author__ = "Daniel Moser"
__email__ = "[email protected]"
def readscr(scrfile):
''' Read source generated with *ref_estrela.txt*.
OUTPUT: M, Req and TP (2*solar units and K).
'''
f0 = open(scrfile)
lines = f0.readlines()
f0.close()
n = int(_phc.fltTxtOccur('STAR =', lines, n=1))
M = _phc.fltTxtOccur('M =', lines, n=n)
Rp = _phc.fltTxtOccur('R_pole =', lines, n=n)
if n == 2:
ob = _phc.fltTxtOccur('R_eq/R_pole =', lines, n=1)
Tp = _phc.fltTxtOccur('Teff_pole =', lines, n=1)
else:
W = _phc.fltTxtOccur('W =', lines, n=1)
bet = _phc.fltTxtOccur('Beta_GD =', lines, n=1)
L = _phc.fltTxtOccur('L =', lines, n=n)
wfrac = _np.sqrt(27. / 8 * (1 + 0.5 * W**2)**-3 * W**2)
ob, Tp, A = rotStar(Tp=L, M=M, rp=Rp, beta=bet, wfrac=wfrac,
quiet=True, LnotTp=True)
# print M,Rp*ob,Tp
return M, Rp * ob, Tp
def vrot_scr(scrfile, old=True):
""" Returns the ``vrot`` value of a given source star.
OUTPUT: vrot in km/s. """
M, Req, Tp = readscr(scrfile)
# Be_M04.80_ob1.40_H0.30_Z0.014_bE_Ell
if old:
rule = '(?<=_ob)(.+)(?=_H)'
ob = float(_re.findall(rule, scrfile)[0])
else:
rule = '(?<=_W)(.+)(?=_t)'
W = float(_re.findall(rule, scrfile)[0])
ob = 1. + .5 * W**2
vrot = wrot(ob, is_ob=True) * \
_np.sqrt(_phc.G.cgs * _phc.Msun.cgs * M / Req / _phc.Rsun.cgs)
return vrot*1e-5
def wrot(par, is_ob=False):
r""" Converts :math:`w_{\rm frac} = \Omega/\Omega_c` into
:math:`W = vrot/vorb`.
If ``is_ob == True``, it considers the param as the oblateness (instead of
:math:`w_{\rm frac}`). """
if is_ob:
wfrac = (1.5 ** 1.5) * _np.sqrt(2. * (par - 1.) / par ** 3)
else:
wfrac = par
if wfrac != 0.:
gam = 2. * _np.cos((_np.pi + _np.arccos(wfrac)) / 3.)
W = _np.sqrt(gam ** 3 / wfrac)
else:
W = 0.
return W
def wfrac_rot(W):
""" Returns wfrac (Omega/Omega_crit) value from a W value.
Equation 1.23 de Faes (2015).
"""
if W < 0 or W > 1:
_warn.warn('Invalid W value')
return _np.sqrt(27/8.*W**2/(1+.5*W**2)**3)
def beta(par, is_ob=False):
r""" Calculate the :math:`\beta` value from Espinosa-Lara for a given
rotation rate :math:`w_{\rm frac} = \Omega/\Omega_c`
If ``is_ob == True``, it consider the param as ob (instead of
:math:`w_{\rm frac}`). """
wfrac = par
if is_ob: # Ekstrom et al. 2008, Eq. 9
wfrac = (1.5 ** 1.5) * _np.sqrt(2. * (par - 1.) / par ** 3)
# avoid exceptions
if wfrac == 0:
return .25
elif wfrac == 1:
return 0.13535
elif wfrac < 0 or wfrac > 1:
_warn.warn('Invalid value of wfrac.')
return 0.
# Espinosa-Lara VLTI-School 2013 lecture, slide 18...
delt = 1.
omega1 = 0.
omega = wfrac
while delt >= 1e-5:
f = (3. / (2. + omega**2))**3 * omega**2 - wfrac**2
df = -108. * omega * (omega**2 - 1.) / (omega**2 + 2.)**4
omega1 = omega - f / df
delt = _np.abs(omega1 - omega) / omega
omega = omega1
nthe = 99
theta = _np.linspace(0, _np.pi / 2, nthe + 2)[1:-1]
grav = _np.zeros(nthe)
teff = _np.zeros(nthe)
corr = _np.zeros(nthe)
beta = 0.
for ithe in range(nthe):
delt = 1.
r1 = 0.
r = 1.
while delt >= 1e-5:
f = omega**2 * r**3 * \
_np.sin(theta[ithe])**2 - (2. + omega**2) * r + 2.
df = 3. * omega**2 * r**2 * \
_np.sin(theta[ithe])**2 - (2. + omega**2)
r1 = r - f / df
delt = _np.abs(r1 - r) / r
r = r1
delt = 1.
n1 = 0.
ftheta = 1. / 3. * omega**2 * r**3 * _np.cos(theta[ithe])**3 + \
_np.cos(theta[ithe]) + _np.log(_np.tan(theta[ithe] / 2.))
n = theta[ithe]
while delt >= 1e-5:
f = _np.cos(n) + _np.log(_np.tan(n / 2.)) - ftheta
df = -_np.sin(n) + 1. / _np.sin(n)
n1 = n - f / df
delt = abs(n1 - n) / n
n = n1
grav[ithe] = _np.sqrt(1. / r**4 + omega**4 * r**2 * _np.sin(
theta[ithe])**2 - 2. * omega**2 * _np.sin(theta[ithe])**2 / r)
corr[ithe] = _np.sqrt(_np.tan(n) / _np.tan(theta[ithe]))
teff[ithe] = corr[ithe] * grav[ithe]**0.25
u = ~_np.isnan(teff)
coef = _np.polyfit(_np.log(grav[u]), _np.log(teff[u]), 1)
beta = coef[0]
return beta
def ellips_th(th, rf):
""" Ellipsoid radius
:param th: theta, in radians (0 = pole; pi/2 = equator).
:param rt: radius fraction (Req/Rp >= 1)
"""
return _np.sqrt(_np.cos(th)**2 + (rf*_np.sin(th))**2)
def rt(th, wfrac):
""" Roche Rpole normalized radius as function of wfrac.
:param th: theta, in radians (0 = pole; pi/2 = equator).
Based on Mc.Gill(?) and J. Patrick Harrington (notes) formula:
``r = 3/wfrac/np.sin(th)*np.cos(1/3.*(np.pi+np.arccos(wfrac*np.sin(th))))``
"""
if wfrac == 0:
wfrac = 1e-9
if th == 0:
r = 1.
else:
r = (-3. * _np.cos((_np.arccos(wfrac * _np.sin(th)) + 4 *
_np.pi) / 3)) / (wfrac * _np.sin(th))
return r
def rotStar(Tp=20000., M=10.3065, rp=5.38462, star='B', beta=0.25, wfrac=0.8,
th_res=5001, quiet=False, LnotTp=False):
""" Return the photospheric parameters of a rotating star.
``LnotTp``: the value of "Tp" is the Luminosity (in solar units).
Calculation of Von Zeipel's Beta parameter as function of W: see math...
INPUT: th_res (theta resolution, integer)...
OUTPUT: printed status + (ob, Tp values, Area[cm2])
"""
Rsun = _phc.Rsun.cgs
Msun = _phc.Msun.cgs
Lsun = _phc.Lsun.cgs
G = _phc.G.cgs
# AU = _phc.au.cgs
# pc = _phc.pc.cgs
sigma = _phc.sigma.cgs
M = M * Msun
rp = rp * Rsun
if wfrac == 0.:
wfrac = 1e-9
if LnotTp:
# Tp = (Tp * Lsun / 4. / _np.pi / rp**2 / sigma)**.25
Tp = (Tp*Lsun / sigma / sigma4b_cranmer(M/Msun, wfrac))**0.25 * \
(G*M / rp**2)**beta
# DEFS ###
# rh = outside
def area(wfrac):
ths = _np.linspace(_np.pi / 2, 0, th_res)
a = 0.
for i in range(len(ths)):
a = a + 2 * _np.pi * rt(ths[i], wfrac) ** 2 * _np.sin(ths[i])
return 2 * a * ths[-2]
def g(wfrac, M, rp, th):
wcrit = _np.sqrt(8 * G * M / (27 * rp ** 3))
g = (wcrit * wfrac) ** 2 * rp * rt(th, wfrac) * \
_np.sin(th) ** 2 - G * M / (rp * rt(th, wfrac)) ** 2
return g
def lum(wfrac, Tp, rp, M, C, beta):
ths = _np.linspace(_np.pi / 2, 0, th_res)
L = 0.
for i in range(len(ths)):
L = L + rt(ths[i], wfrac) ** 2 * _np.sin(ths[i]) * \
(abs(g(wfrac, M, rp, ths[i]))) ** (4 * beta)
return 2 * 2 * _np.pi * ths[-2] * sigma * rp ** 2 * C ** (4 * beta) * L
def lumf(wfrac, Tp, rp, M, beta):
ths = _np.linspace(_np.pi / 2, 0, th_res)
L = 0.
for i in range(len(ths)):
L = L + rt(ths[i], wfrac) ** 2 * _np.sin(ths[i]) * \
abs(g(wfrac, M, rp, ths[i])) ** (4 * beta)
return L * ths[-2] * rp ** 2
if star.startswith('B'):
Bstars = _np.array(bestarsHarm1988, dtype=str)
if star in Bstars:
i = _np.where(Bstars[:, 0] == star)
i = i[0][0]
print(Bstars[i][0])
Tp = float(Bstars[i][1])
M = float(Bstars[i][2]) * Msun
rp = float(Bstars[i][3]) * Rsun
# comentar linha abaixo se 1a. rodada:
# Tp = 27438.63 #K
wcrit = _np.sqrt(8 * G * M / (27 * rp ** 3))
C = Tp ** (1. / beta) / abs(G * M / rp ** 2)
vrot = wcrit * wfrac * rp * rt(_np.pi / 2, wfrac)
lum0 = 4 * _np.pi * rp ** 2 * sigma * Tp ** 4 / Lsun
# a = rp**2*Tp**4*abs(g(wfrac,M,rp,0.))**(4*beta)
# print('Teff_pol* = %.2f' % ( (a/b)**beta ) )
b = lumf(wfrac, Tp, rp, M, beta)
c = lumf(0.0001, Tp, rp, M, beta)
Cw = (c / b) ** (1. / (4. * beta)) * C
ob = rt(_np.pi / 2, wfrac) # /(rp / Rsun)
# OUTPUT ###
if not quiet:
print('# Parameters:')
print('wfrac = %.4f' % (wfrac))
print('W = %.4f' % (_np.sqrt(2 * (ob - 1))))
print('Star Mass = %.2f Msun' % (M / Msun))
print('Rpole = %.2f Rsun' % (rp / Rsun))
print('Req = %.2f Rpole' % (rt(_np.pi / 2, wfrac)))
print('Teff_pol = %.1f' % (Tp))
print('Star Area = %.2f' % (area(wfrac)))
print('Star Lum. = %.1f' % (lum(wfrac, Tp, rp, C, M, beta) / Lsun))
print('Star Lum.*= %.1f' % (lum0))
print('vrot(km/s)= %.1f' % (vrot / 1e5))
print('vorb(km/s)= %.1f' %
(_np.sqrt(G * M / rp / rt(_np.pi / 2, wfrac)) / 1e5) )
print('vcrt(km/s)= %.1f' % (wcrit * rp * rt(_np.pi / 2, 1.) / 1e5))
print('log(g)pole= %.2f' % (_np.log10(abs(g(wfrac, M, rp, 0.))) ))
print('log(g)eq = %.2f' %
(_np.log10(abs(g(wfrac, M, rp, _np.pi / 2))) ))
print('Teff_eq = %.1f' %
( (C * abs(g(wfrac, M, rp, _np.pi / 2))) ** beta) )
print('Teff_eq* = %.1f' %
( (Cw * abs(g(wfrac, M, rp, _np.pi / 2))) ** beta) )
print('Teff_pol* = %.2f' % ( (Cw * abs(g(wfrac, M, rp, 0.))) ** beta) )
print('T_pol/eq* = %.4f' % ((Cw * abs(g(wfrac, M, rp, 0.))) ** beta /
(Cw * abs(g(wfrac, M, rp, _np.pi / 2))) ** beta) )
print('# \"*\" == case where L is constant!')
return ob, (Cw * abs(g(wfrac, M, rp, 0.))) ** beta, area(wfrac) * (rp**2)
def rochearea(wfrac, isW=False):
""" Calculate the Roche area of a rigid rotator.
Equation 4.23 from Cranmer 1996 (thesis).
Area in (squared) radial unit (it must be multiplied to Rpole**2 to a
physical size).
"""
if isW:
w = wfrac_rot(wfrac)
else:
w = wfrac
return 4*_np.pi*(1+.19444*w**2+0.28053*w**2-1.9014*w**6+6.8298*w**8-
9.502*w**10+4.6631*w**12)
def sigma4b_cranmer(M, w):
'''Computes sigma4b defined in Cranmer 1996 (Eq. 4.22)
Usage:
s4b = sigma4b_cranmer(M, w)
where w=Omega/Omega_c, M=stellar mass in Msun (from 1.7 to 20.)
'''
dir0 = '{0}/refs/tables/'.format(_hdtpath())
tab = _np.load(dir0 + 'sigma4b_cranmer.npz')
s4b = _griddata(tab['parlist'], tab['sigma4b'], _np.array([M, w]),
method='linear')[0]
return s4b
bestarsHarm1988 = [
# The numbers below are based on Harmanec 1988
# B1.5 and B2.5 interpolated by Faes.
# Teff fixed: Rp2 from Lum1; Lum2 from Rp1.
# SpType Teff Mass Rp Lum '' Rp2 Lum2
['B0.0', 29854., 14.57, 05.80, 23948.8487173, 6.19, 27290.],
['B0.5', 28510., 13.19, 05.46, 17651.9502267, 5.80, 19953.],
['B1.0', 26182., 11.03, 04.91, 10152.9628687, 5.24, 11588.],
['B1.5', 24599., 09.72, 04.58, 6883.65832266, 4.87, 07768.],
['B2.0', 23121., 08.62, 04.28, 4691.72482578, 4.55, 05297.],
['B2.5', 20980., 07.18, 03.90, 2641.00783143, 4.11, 02931.],
['B3.0', 19055., 06.07, 03.56, 1497.45695726, 3.78, 01690.],
['B4.0', 17179., 05.12, 03.26, 829.555139678, 3.48, 00946.],
['B5.0', 15488., 04.36, 03.01, 467.232334920, 3.21, 00530.],
['B6.0', 14093., 03.80, 02.81, 279.154727515, 2.99, 00316.],
['B7.0', 12942., 03.38, 02.65, 176.569574061, 2.82, 00200.],
['B8.0', 11561., 02.91, 02.44, 95.3190701227, 2.61, 00109.],
['B9.0', 10351., 02.52, 02.25, 52.0850169839, 2.39, 0059.1]]
# ['B9.5', 09886., 02.38, 02.17, 00046., 2.32, 40.3107085348]]
bestarsSK1982 = [
# Schmidt-Kaller1982. Used (and interpolated) by Porter1996, Townsedn2004,
# SpType Teff Mass Rp Lum
['B0.0', 30105., 17.5, 7.70, 43651.],
['B0.5', 27859., 14.6, 6.90, 25703.],
['B1.0', 25985., 12.5, 6.30, 16218.],
['B1.5', 24347., 10.8, 5.70, 10232.],
['B2.0', 22813., 09.6, 5.40, 07079.],
['B2.5', 21498., 08.6, 5.00, 04786.],
['B3.0', 20222., 07.7, 4.70, 03311.],
['B4.0', 18206., 06.4, 4.20, 01737.],
['B5.0', 16673., 05.5, 3.80, 01000.],
['B6.0', 15302., 04.8, 3.50, 00602.],
['B7.0', 14103., 04.2, 3.20, 00363.],
['B8.0', 13202., 03.8, 3.00, 00245.],
['B9.0', 12246., 03.4, 2.80, 00158.]]
bestarsdJN1987 = [
# Derived by de Jager & Niewuwenhuijzen 1987 to the main sequence (b=5.)
# lum class IV (b=4.); Used by Cranmer2005
# Conclusion: 5 and 4 apper do be ZAMS and mid-MS; 3 late MS
# Conclusion: SpTypes appear to be shifhed by -1.0 here (cooler stars)
# SpType b-val Teff_V Mass_V Rp_5 Lum_V Teff_4 Mass_4 Rp_4 Lum_4
['B0.0', 1.200, 26841, 13.8, 6.58, 20134., 26911, 15.11, 7.84, 28919.],
['B0.5', 1.350, 24944, 11.4, 5.82, 11742., 24809, 12.30, 6.90, 16183.],
['B1.0', 1.500, 23213, 9.63, 5.16, 06917., 22915, 10.17, 6.11, 09222.],
['B1.5', 1.650, 21629, 8.17, 4.58, 04118., 21204, 08.54, 5.44, 05355.],
['B2.0', 1.800, 20178, 7.01, 4.08, 02478., 19655, 07.27, 4.87, 03171.],
['B2.5', 1.875, 19498, 6.51, 3.86, 01930., 18935, 06.74, 4.62, 02458.],
['B3.0', 1.950, 18846, 6.07, 3.65, 01508., 18250, 06.27, 4.39, 01915.],
['B4.0', 2.100, 17621, 5.31, 3.28, 00928., 16972, 05.48, 3.99, 01181.],
['B5.0', 2.250, 16493, 4.69, 2.95, 00578., 15810, 04.84, 3.64, 00743.],
['B6.0', 2.400, 15452, 4.18, 2.67, 00364., 14749, 04.33, 3.36, 00478.],
['B7.0', 2.550, 14491, 3.75, 2.42, 00232., 13780, 03.91, 3.12, 00314.],
['B8.0', 2.700, 13601, 3.40, 2.21, 00150., 12893, 03.57, 2.92, 00211.],
['B9.0', 2.850, 12778, 3.10, 2.03, 00098., 12080, 03.29, 2.76, 00145.]]
bestarsdJN1987_3 = [
# Derived by de Jager & Niewuwenhuijzen 1987 to the main sequence (b=5.)
# lum class IV (b=4.); Used by Cranmer2005
# Conclusions with Geneva models: class III is still in the main sequence!
# (but leaving, ~Achernar)
# Conclusion: SpTypes appear to be shifhed by -1 step here (cooler stars)
# SpType b-val Teff_3 Mass_3 Rp_3 Lum_3
['B0.0', 1.200, 25030, 14.8, 9.93, 34661.],
['B0.5', 1.350, 23009, 12.2, 8.92, 19969.],
['B1.0', 1.500, 21198, 10.2, 8.05, 11731.],
['B1.5', 1.650, 19570, 8.65, 7.31, 07032.],
['B2.0', 1.800, 18105, 7.43, 6.69, 04305.],
['B2.5', 1.875, 17427, 6.93, 6.41, 03396.],
['B3.0', 1.950, 16782, 6.48, 6.16, 02693.],
['B4.0', 2.100, 15586, 5.71, 5.71, 01723.],
['B5.0', 2.250, 14502, 5.10, 5.33, 01128.],
['B6.0', 2.400, 13519, 4.60, 5.03, 00756.],
['B7.0', 2.550, 12624, 4.20, 4.78, 00520.],
['B8.0', 2.700, 11809, 3.86, 4.58, 00366.],
['B9.0', 2.850, 11065, 3.59, 4.43, 00264.]]
bestarsBeAtlas = [
# H = 0.3 core
# For ob=1.10, i.e., one *CAN'T* apply 4*pi*R^2...
# SpType Tpole Teff Mass Rp Lum
['B0.0', _np.NaN, _np.NaN, _np.NaN, _np.NaN, _np.NaN],
['B0.5', 28905.8, 26765.7, 14.6, 7.50, 31183.26],
['B1.0', 26945.8, 24950.9, 12.5, 6.82, 19471.38],
['B1.5', 25085.2, 23228.2, 10.8, 6.23, 12204.70],
['B2.0', 23629.3, 21879.9, 09.6, 5.80, 08327.67],
['B2.5', 22296.1, 20645.4, 08.6, 5.43, 05785.96],
['B3.0', 20919.7, 19370.9, 07.7, 5.11, 03971.25],
['B4.0', 18739.3, 17351.9, 06.4, 4.62, 02090.08],
['B5.0', 17063.8, 15800.5, 05.5, 4.26, 01221.76],
['B6.0', 15587.7, 14433.6, 04.8, 4.02, 00757.60],
['B7.0', 14300.3, 13241.6, 04.2, 3.72, 00459.55],
['B8.0', 13329.9, 12343.0, 03.8, 3.55, 00315.96],
['B9.0', 12307.1, 11395.9, 03.4, 3.37, 00206.89]]
bestarsBeAtlas_N = [
# For ob=1.10
# SpType Tpole Teff Mass Rp Lum
['B0.0', 28905.8, 26765.7, 14.6, 7.50, 31183.26],
['B0.5', 26945.8, 24950.9, 12.5, 6.82, 19471.38],
['B1.0', 25085.2, 23228.2, 10.8, 6.23, 12204.70],
['B1.5', 23629.3, 21879.9, 09.6, 5.80, 08327.67],
['B2.0', 22296.1, 20645.4, 08.6, 5.43, 05785.96],
['B2.5', 20919.7, 19370.9, 07.7, 5.11, 03971.25],
['B3.0', 18739.3, 17351.9, 06.4, 4.62, 02090.08],
['B4.0', 17063.8, 15800.5, 05.5, 4.26, 01221.76],
['B5.0', 15587.7, 14433.6, 04.8, 4.02, 00757.60],
['B6.0', 14300.3, 13241.6, 04.2, 3.72, 00459.55],
['B7.0', 13329.9, 12343.0, 03.8, 3.55, 00315.96],
['B8.0', 12307.1, 11395.9, 03.4, 3.37, 00206.89],
['B9.0', _np.NaN, _np.NaN, _np.NaN, _np.NaN, _np.NaN]]
def oblat2w(oblat):
'''
Converts oblateness into wc=Omega/Omega_crit
Ekstrom et al. 2008, Eq. 9
Usage:
w = oblat2w(oblat)
'''
w = (1.5**1.5) * _np.sqrt(2.*(oblat - 1.) / oblat**3.)
return w
def geneva_closest(Mstar, oblat, t, Zstr='014', tar=None, silent=True):
'''
Interpolate models between rotation rates, at closest Mstar.
Usage:
Rpole, logL = geneva_closest(Mstar, oblat, t, Zstr='014', tar=None,
silent=True)
where t is given in tMS, and tar is the open tar file. The chosen
metallicity is according to the input tar file. If tar=None, the
code will take Zstr='014' by default.
'''
# oblat to Omega/Omega_c
w = oblat2w(oblat)
# grid
if Mstar <= 20.:
Mlist = _np.array([1.7, 2., 2.5, 3., 4., 5., 7., 9., 12., 15., 20.])
Mstr = _np.array(['1p700', '2p000', '2p500', '3p000', '4p000', '5p000',
'7p000', '9p000', '12p00', '15p00', '20p00'])
Vlist = _np.array([0., 0.1, 0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95])
Vstr = _np.array(['00000', '10000', '30000', '50000', '60000', '70000',
'80000', '90000', '95000'])
else:
Mlist = _np.array([20., 25., 32., 40., 60., 85., 120.])
Mstr = _np.array(['20p00', '25p00', '32p00', '40p00', '60p00', '85p00',
'120p0'])
Vlist = _np.array([0., 0.568])
Vstr = _np.array(['00000', '56800'])
# read tar file
if tar is None:
dir0 = '{0}/refs/geneva_models/'.format(_hdtpath())
fmod = 'Z{:}.tar.gz'.format(Zstr)
tar = _tarfile.open(dir0 + fmod, 'r:gz')
else:
Zstr = tar.getnames()[0][7:10]
# find closest Mstar
iM = _np.where(_np.abs(Mstar-Mlist) == _np.min(_np.abs(Mstar-Mlist)))[0][0]
# find values at selected age
nw = len(Vlist)
wlist = _np.zeros(nw)
Rplist = _np.zeros(nw)
logLlist = _np.zeros(nw)
agelist = _np.zeros(nw)
for iw, vs in enumerate(Vstr):
fname = 'M{:}Z{:}00V{:}.dat'.format(Mstr[iM], Zstr, vs)
age1, _, logL1, _, Hfrac1, _, _, w1, Rpole1 = geneva_read(fname,
tar=tar)
t1 = age1 / age1[_np.where(Hfrac1 == 0.)[0][0]-1]
if t > t1.max() and not silent:
print('[geneva_closest] Warning: requested age not available, '
'taking t/tMS={:.2f} instead of t/tMS={:.2f}.'.format(
t1.max(), t))
it = _np.where(_np.abs(t-t1) == _np.min(_np.abs(t-t1)))[0][0]
wlist[iw] = w1[it]
Rplist[iw] = Rpole1[it]
logLlist[iw] = logL1[it]
agelist[iw] = age1[it] / 1e6
# interpolate between rotation rates
if w <= wlist.max():
Rpole = _griddata(wlist, Rplist, [w], method='linear')[0]
logL = _griddata(wlist, logLlist, [w], method='linear')[0]
age = _griddata(wlist, agelist, [w], method='linear')[0]
else:
if not silent:
print('[geneva_closest] Warning: no model rotating this fast at '
'this age, taking closest model instead. (omega={:.2f} '
'instead of omega={:.2f})'.format(wlist.max(), w))
iwmax = _np.where(wlist == wlist.max())[0][0]
Rpole = Rplist[iwmax]
logL = logLlist[iwmax]
age = agelist[iwmax]
return Rpole, logL, age
def geneva_interp(Mstar, oblat, t, Zstr='014', tar=None, silent=True):
'''
Interpolates Geneva stellar models.
Usage:
Rpole, logL, age = geneva_interp(Mstar, oblat, t, tar=None, silent=True)
where t is given in tMS, and tar is the open tar file. The chosen
metallicity is according to the input tar file. If tar=None, the
code will take Zstr='014' by default.
'''
# oblat to Omega/Omega_c
# w = oblat2w(oblat)
# grid
if Mstar <= 20.:
Mlist = _np.array([1.7, 2., 2.5, 3., 4., 5., 7., 9., 12., 15., 20.])
else:
Mlist = _np.array([20., 25., 32., 40., 60., 85., 120.])
# read tar file
if tar is None:
dir0 = '{0}/refs/geneva_models/'.format(_hdtpath())
fmod = 'Z{:}.tar.gz'.format(Zstr)
tar = _tarfile.open(dir0 + fmod, 'r:gz')
else:
Zstr = tar.getnames()[0][7:10]
# interpolation
if (Mstar >= Mlist.min()) * (Mstar <= Mlist.max()):
if (Mstar == Mlist).any():
Rpole, logL, age = geneva_closest(Mstar, oblat, t, tar=tar,
Zstr=Zstr, silent=silent)
else:
# nearest value at left
Mleft = Mlist[Mlist < Mstar]
Mleft = Mleft[_np.abs(Mleft - Mstar).argmin()]
iMleft = _np.where(Mlist == Mleft)[0][0]
Rpolel, logLl, agel = geneva_closest(Mlist[iMleft], oblat, t,
tar=tar, Zstr=Zstr, silent=silent)
# nearest value at right
Mright = Mlist[Mlist > Mstar]
Mright = Mright[_np.abs(Mright - Mstar).argmin()]
iMright = _np.where(Mlist == Mright)[0][0]
Rpoler, logLr, ager = geneva_closest(Mlist[iMright], oblat, t,
tar=tar, Zstr=Zstr, silent=silent)
# interpolate between masses
weight = _np.array([Mright-Mstar, Mstar-Mleft]) / (Mright-Mleft)
Rpole = weight.dot(_np.array([Rpolel, Rpoler]))
logL = weight.dot(_np.array([logLl, logLr]))
age = weight.dot(_np.array([agel, ager]))
else:
if not silent:
print('[geneva_interp] Warning: Mstar out of available range, '
'taking the closest value.')
Rpole, logL, age = geneva_closest(Mstar, oblat, t, tar=tar, Zstr=Zstr,
silent=silent)
return Rpole, logL, age
def geneva_interp_fast(Mstar, oblat, t, Zstr='014', silent=True):
'''
Interpolates Geneva stellar models, from grid of
pre-computed interpolations.
Usage:
Rpole, logL, age = geneva_interp_fast(Mstar, oblat, t, Zstr='014')
where t is given in tMS, and tar is the open tar file. For now, only
Zstr='014' is available.
'''
# read grid
dir0 = '{0}/refs/geneva_models/'.format(_hdtpath())
if Mstar <= 20.:
fname = 'geneva_interp_Z{:}.npz'.format(Zstr)
else:
fname = 'geneva_interp_Z{:}_highM.npz'.format(Zstr)
data = _np.load(dir0 + fname)
Mstar_arr = data['Mstar_arr']
oblat_arr = data['oblat_arr']
t_arr = data['t_arr']
Rpole_grid = data['Rpole_grid']
logL_grid = data['logL_grid']
age_grid = data['age_grid']
# build grid of parameters
par_grid = []
for M in Mstar_arr:
for ob in oblat_arr:
for tt in t_arr:
par_grid.append([M, ob, tt])
par_grid = _np.array(par_grid)
# set input/output parameters
par = _np.array([Mstar, oblat, t])
# set ranges
ranges = _np.array([[par_grid[:, i].min(), par_grid[:, i].max()] for i in
range(len(par))])
# find neighbours
keep, out, inside_ranges, par, par_grid = _phc.find_neighbours(par,
par_grid, ranges)
# interpolation method
if inside_ranges:
interp_method = 'linear'
else:
if not silent:
print('[geneva_interp_fast] Warning: parameters out of available '
'range, taking closest model')
interp_method = 'nearest'
if len(keep[keep]) == 1:
# coincidence
Rpole = Rpole_grid.flatten()[keep][0]
logL = logL_grid.flatten()[keep][0]
age = age_grid.flatten()[keep][0]
else:
# interpolation
Rpole = _griddata(par_grid[keep], Rpole_grid.flatten()[keep], par,
method=interp_method, rescale=True)[0]
logL = _griddata(par_grid[keep], logL_grid.flatten()[keep], par,
method=interp_method, rescale=True)[0]
age = _griddata(par_grid[keep], age_grid.flatten()[keep], par,
method=interp_method, rescale=True)[0]
return Rpole, logL, age
def geneva_pre_computed(Zstr='014', silent=False):
'''
Create geneva pre-computed grid
'''
dir0 = '{0}/refs/geneva_models/'.format(_hdtpath())
if _os.path.exists(dir0 + 'geneva_interp_Z{:}.npz'.format(Zstr)):
data = _np.load(dir0 + 'geneva_interp_Z{:}.npz'.format(Zstr))
else:
# par grid
Mstar_arr = _np.array(
[1.7, 2., 2.5, 3., 4., 5., 7., 9., 12., 15., 20.])
oblat_arr = _np.linspace(1., 1.5, 6)
t_arr = _np.hstack([_np.linspace(0., .9, 10),
_np.linspace(1., 1.1, 21)])
Rpole_grid = _np.zeros([len(Mstar_arr), len(oblat_arr), len(t_arr)])
logL_grid = _np.zeros([len(Mstar_arr), len(oblat_arr), len(t_arr)])
age_grid = _np.zeros([len(Mstar_arr), len(oblat_arr), len(t_arr)])
# read tar file
tar = _tarfile.open(dir0 + 'Z{:}.tar.gz'.format(Zstr), 'r:gz')
for iM, Mstar in enumerate(Mstar_arr):
for iob, oblat in enumerate(oblat_arr):
for it, t in enumerate(t_arr):
if not silent:
print(Mstar, oblat, t)
Rp, lL, age = geneva_interp(Mstar, oblat, t, tar=tar,
Zstr=Zstr, silent=silent)
Rpole_grid[iM, iob, it] = Rp
logL_grid[iM, iob, it] = lL
age_grid[iM, iob, it] = age
_np.savez(dir0 + 'geneva_interp_Z{:}'.format(Zstr),
Mstar_arr=Mstar_arr, oblat_arr=oblat_arr, t_arr=t_arr,
Rpole_grid=Rpole_grid, logL_grid=logL_grid, age_grid=age_grid)
# high M
if _os.path.exists(dir0 + 'geneva_interp_Z{:}_highM.npz'.format(Zstr)):
data = _np.load(dir0 + 'geneva_interp_Z{:}_highM.npz'.format(Zstr))
return
# par grid
Mstar_arr = _np.array([20., 25., 32., 40., 60., 85., 120.])
oblat_arr = _np.linspace(1., 1.05633802817, 2)
t_arr = _np.hstack([_np.linspace(0., .9, 10),
_np.linspace(1., 1.1, 21)])
Rpole_grid = _np.zeros([len(Mstar_arr), len(oblat_arr), len(t_arr)])
logL_grid = _np.zeros([len(Mstar_arr), len(oblat_arr), len(t_arr)])
age_grid = _np.zeros([len(Mstar_arr), len(oblat_arr), len(t_arr)])
# read tar file
tar = _tarfile.open(dir0 + 'Z{:}.tar.gz'.format(Zstr), 'r:gz')
for iM, Mstar in enumerate(Mstar_arr):
for iob, oblat in enumerate(oblat_arr):
for it, t in enumerate(t_arr):
if not silent:
print(Mstar, oblat, t)
Rp, lL, age = geneva_interp(Mstar, oblat, t, tar=tar,
Zstr=Zstr, silent=silent)
Rpole_grid[iM, iob, it] = Rp
logL_grid[iM, iob, it] = lL
age_grid[iM, iob, it] = age
_np.savez(dir0 + 'geneva_interp_Z{:}_highM'.format(Zstr),
Mstar_arr=Mstar_arr, oblat_arr=oblat_arr, t_arr=t_arr,
Rpole_grid=Rpole_grid, logL_grid=logL_grid, age_grid=age_grid)
return
def geneva_read(fname, Zstr='014', tar=None):
'''
Reads Geneva model file
Usage:
age, Mstar, logL, logTeff, Hfrac, Hefrac, oblat, w, Rpole =
geneva_read(fname, tar=None)
where tar is the read tar(.gz) opened file.
'''
# read tar file
if tar is None:
dir0 = '{0}/refs/geneva_models/'.format(_hdtpath())
fmod = 'Z{:}.tar.gz'.format(Zstr)
tar = _tarfile.open(dir0 + fmod, 'r:gz')
else:
Zstr = tar.getnames()[0][7:10]
m = tar.getmember(fname)
fname = tar.extractfile(m)
# (age, M, logL, logTeff, Hfrac, Hefrac, oblat, w, Rpole)
cols = (1, 2, 3, 4, 21, 22, 34, 39, 44)
t = _np.loadtxt(fname, usecols=cols, skiprows=2)
age = t[:, 0]
Mstar = t[:, 1]
logL = t[:, 2]
logTeff = t[:, 3]
Hfrac = t[:, 4]
Hefrac = t[:, 5]
oblat = 1./t[:, 6]
w = t[:, 7]
Rpole = t[:, 8]
return age, Mstar, logL, logTeff, Hfrac, Hefrac, oblat, w, Rpole
# MAIN ###
if __name__ == "__main__":
pass
| gpl-3.0 | 1,490,024,845,966,270,700 | 34.874378 | 80 | 0.519745 | false |
axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/errors.py | 1 | 1963 | # -*- coding: utf-8 -*-
"""
sphinx.errors
~~~~~~~~~~~~~
Contains SphinxError and a few subclasses (in an extra module to avoid
circular import problems).
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class SphinxError(Exception):
"""
Base class for Sphinx errors that are shown to the user in a nicer
way than normal exceptions.
"""
category = 'Sphinx error'
class SphinxWarning(SphinxError):
"""Raised for warnings if warnings are treated as errors."""
category = 'Warning, treated as error'
class ExtensionError(SphinxError):
"""Raised if something's wrong with the configuration."""
category = 'Extension error'
def __init__(self, message, orig_exc=None):
SphinxError.__init__(self, message)
self.orig_exc = orig_exc
def __repr__(self):
if self.orig_exc:
return '%s(%r, %r)' % (self.__class__.__name__,
self.message, self.orig_exc)
return '%s(%r)' % (self.__class__.__name__, self.message)
def __str__(self):
parent_str = SphinxError.__str__(self)
if self.orig_exc:
return '%s (exception: %s)' % (parent_str, self.orig_exc)
return parent_str
class ConfigError(SphinxError):
category = 'Configuration error'
class ThemeError(SphinxError):
category = 'Theme error'
class VersionRequirementError(SphinxError):
category = 'Sphinx version error'
class PycodeError(Exception):
def __str__(self):
res = self.args[0]
if len(self.args) > 1:
res += ' (exception was: %r)' % self.args[1]
return res
class SphinxParallelError(SphinxError):
category = 'Sphinx parallel build error'
def __init__(self, message, traceback):
self.message = message
self.traceback = traceback
def __str__(self):
return self.message
| apache-2.0 | 1,207,145,405,419,724,500 | 24.493506 | 74 | 0.608253 | false |
praekeltfoundation/seaworthy | seaworthy/tests-core/test_client.py | 1 | 13579 | import unittest
import requests.exceptions
import responses
from seaworthy.checks import docker_client, dockertest
from seaworthy.client import ContainerHttpClient, wait_for_response
from seaworthy.definitions import ContainerDefinition
from seaworthy.helpers import DockerHelper, fetch_images
# Small (<4MB) image that echoes HTTP requests and runs without configuration
IMG = 'jmalloc/echo-server'
@dockertest()
def setUpModule(): # noqa: N802 (The camelCase is mandated by unittest.)
with docker_client() as client:
fetch_images(client, [IMG])
def echo_container(name, **kw):
kw.setdefault('wait_patterns', ('Echo server listening on port 8080.',))
return ContainerDefinition(name, IMG, **kw)
class DummySession:
def __init__(self):
self.requests = []
self.was_closed = False
def request(self, *args, **kwargs):
self.requests.append((args, kwargs))
def close(self):
self.was_closed = True
def check_was_closed(self):
was_closed, self.was_closed = self.was_closed, False
return was_closed
class TestContainerHttpClient(unittest.TestCase):
def make_helper(self):
dh = DockerHelper()
self.addCleanup(dh.teardown)
return dh.containers
@responses.activate
def test_defaults(self):
"""
When the container client is configured with a host address and port,
requests are made to that address and port.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
responses.add(responses.GET, 'http://127.0.0.1:12345/', status=200)
response = client.request('GET')
self.assertEqual(response.status_code, 200)
[call] = responses.calls
self.assertEqual(call.request.url, 'http://127.0.0.1:12345/')
@responses.activate
def test_url_defaults(self):
"""
When the container client is configured with a host address and port,
and some URL defaults are set, requests are made to that address and
port with the expected URL.
"""
client = ContainerHttpClient('127.0.0.1', '12345', url_defaults={
'scheme': 'https',
'fragment': 'test',
})
responses.add(responses.GET, 'https://127.0.0.1:12345/baz', status=200)
response = client.request('GET', '/baz', url_kwargs={
'query': (('foo', 'bar'),),
})
self.assertEqual(response.status_code, 200)
[call] = responses.calls
self.assertEqual(
call.request.url, 'https://127.0.0.1:12345/baz?foo=bar#test')
@responses.activate
def test_paths(self):
"""
The path is appended to the URL correctly with various leading or
trailing ``/`` characters.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
# Root path
responses.add(responses.GET, 'http://127.0.0.1:12345/', status=200)
client.request('GET', '') # Requests adds a trailing /
client.request('GET', '/')
self.assertEqual(
responses.calls[0].request.url, 'http://127.0.0.1:12345/')
self.assertEqual(
responses.calls[1].request.url, 'http://127.0.0.1:12345/')
# Leading slashes are ignored
responses.add(
responses.GET, 'http://127.0.0.1:12345/a/b/c', status=200)
client.request('GET', '/a/b/c')
client.request('GET', 'a/b/c')
self.assertEqual(
responses.calls[2].request.url, 'http://127.0.0.1:12345/a/b/c')
self.assertEqual(
responses.calls[3].request.url, 'http://127.0.0.1:12345/a/b/c')
# Trailing slashes are respected
responses.add(
responses.GET, 'http://127.0.0.1:12345/a/b/c/', status=200)
client.request('GET', '/a/b/c/')
self.assertEqual(
responses.calls[4].request.url, 'http://127.0.0.1:12345/a/b/c/')
# Double slashes are not ignored
responses.add(
responses.GET, 'http://127.0.0.1:12345//a//b', status=200)
client.request('GET', '//a//b')
self.assertEqual(
responses.calls[5].request.url, 'http://127.0.0.1:12345//a//b')
@responses.activate
def test_relative_paths(self):
"""
The path can be specified as a relative or absolute path.
"""
client = ContainerHttpClient(
'127.0.0.1', '12345', url_defaults={'path': ['foo']})
responses.add(
responses.GET, 'http://127.0.0.1:12345/foo/bar/baz', status=200)
client.request('GET', 'bar/baz')
self.assertEqual(responses.calls[0].request.url,
'http://127.0.0.1:12345/foo/bar/baz')
responses.add(
responses.GET, 'http://127.0.0.1:12345/foobar', status=200)
client.request('GET', '/foobar')
self.assertEqual(responses.calls[1].request.url,
'http://127.0.0.1:12345/foobar')
@responses.activate
def test_methods(self):
"""
When the HTTP method-specific methods are called, the correct request
method is used.
"""
client = ContainerHttpClient('127.0.0.1', '45678')
responses.add(responses.GET, 'http://127.0.0.1:45678/', status=200)
responses.add(
responses.OPTIONS, 'http://127.0.0.1:45678/foo', status=201)
responses.add(responses.HEAD, 'http://127.0.0.1:45678/bar', status=403)
responses.add(responses.POST, 'http://127.0.0.1:45678/baz', status=404)
responses.add(responses.PUT, 'http://127.0.0.1:45678/test', status=418)
responses.add(
responses.PATCH, 'http://127.0.0.1:45678/a/b/c', status=501)
responses.add(
responses.DELETE, 'http://127.0.0.1:45678/d/e/f', status=503)
get_response = client.get()
options_response = client.options('/foo')
head_response = client.head('/bar')
post_response = client.post('/baz')
put_response = client.put('/test')
patch_response = client.patch('/a/b/c')
delete_response = client.delete('/d/e/f')
self.assertEqual(get_response.status_code, 200)
self.assertEqual(options_response.status_code, 201)
self.assertEqual(head_response.status_code, 403)
self.assertEqual(post_response.status_code, 404)
self.assertEqual(put_response.status_code, 418)
self.assertEqual(patch_response.status_code, 501)
self.assertEqual(delete_response.status_code, 503)
self.assertEqual(len(responses.calls), 7)
def test_session(self):
"""
When a custom session object is given, that object is used to make
requests and is closed when ``close()`` is called.
"""
session = DummySession()
client = ContainerHttpClient('127.0.0.1', '12345', session=session)
client.request('GET', '/foo')
client.request('POST', '/bar')
self.assertEqual(session.requests, [
(('GET', 'http://127.0.0.1:12345/foo'), {}),
(('POST', 'http://127.0.0.1:12345/bar'), {}),
])
client.close()
self.assertTrue(session.check_was_closed())
def test_session_context_manager(self):
"""
When a custom session object is given, that object is used to make
requests and is closed when the context is exited when the container
client is used as a context manager.
"""
session = DummySession()
client = ContainerHttpClient('127.0.0.1', '12345', session=session)
with client:
client.request('GET', '/foo')
self.assertEqual(session.requests, [
(('GET', 'http://127.0.0.1:12345/foo'), {}),
])
self.assertTrue(session.check_was_closed())
@dockertest()
def test_for_container_first_port(self):
"""
The ``for_container()`` class method returns a container client that
connects to the container's first port when a specific port is not
specified.
"""
ch = self.make_helper()
container = echo_container('first_port', create_kwargs={
'ports': {'8080/tcp': ('127.0.0.1', None)}
}, helper=ch)
container.setup()
self.addCleanup(container.teardown)
client = ContainerHttpClient.for_container(container)
self.addCleanup(client.close)
response = client.request('GET', '/foo')
self.assertEqual(response.status_code, 200)
response_lines = response.text.splitlines()
self.assertIn('HTTP/1.1 GET /foo', response_lines)
addr, port = container.get_first_host_port()
self.assertIn('Host: {}:{}'.format(addr, port), response_lines)
@dockertest()
def test_for_container_specific_port(self):
"""
The ``for_container()`` class method returns a container client that
connects to the container port specified.
"""
ch = self.make_helper()
container = echo_container('first_port', create_kwargs={
'ports': {
'8080/tcp': ('127.0.0.1', None),
'5353/udp': ('127.0.0.1', None),
}
}, helper=ch)
container.setup()
self.addCleanup(container.teardown)
client = ContainerHttpClient.for_container(
container, container_port='8080')
self.addCleanup(client.close)
response = client.request('GET', '/foo')
self.assertEqual(response.status_code, 200)
response_lines = response.text.splitlines()
self.assertIn('HTTP/1.1 GET /foo', response_lines)
addr, port = container.get_host_port('8080')
self.assertIn('Host: {}:{}'.format(addr, port), response_lines)
class TestWaitForResponseFunc(unittest.TestCase):
@responses.activate
def test_success(self):
"""
When a request succeeds before the timeout, all is happy.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
responses.add(responses.GET, 'http://127.0.0.1:12345/', status=200)
# A failure here will raise an exception.
# 100ms is long enough for a first-time success.
wait_for_response(client, 0.1)
@responses.activate
def test_success_with_status_code(self):
"""
When a request succeeds before the timeout and has the expected status
code, all is happy.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
responses.add(responses.GET, 'http://127.0.0.1:12345/', status=200)
# A failure here will raise an exception.
# 100ms is long enough for a first-time success.
wait_for_response(client, 0.1, expected_status_code=200)
@responses.activate
def test_error_then_success(self):
"""
When an exception is raised before the timeout, we retry and are happy
with any successful request before the timeout.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
responses.add(
responses.GET, 'http://127.0.0.1:12345/', body=Exception('KABOOM'))
responses.add(responses.GET, 'http://127.0.0.1:12345/', status=200)
# A failure here will raise an exception.
# Because responses is fast 110ms gives us time to fail, wait 100ms,
# then succeed.
wait_for_response(client, 0.11)
@responses.activate
def test_error_timeout(self):
"""
When exceptions are raised without a successful request before the
timeout, we time out.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
responses.add(
responses.GET, 'http://127.0.0.1:12345/', body=Exception('KABOOM'))
with self.assertRaises(TimeoutError) as cm:
# 190ms is enough time to fail, wait 100ms, fail again, wait 100ms,
# then time out.
wait_for_response(client, 0.19)
self.assertEqual(
str(cm.exception), 'Timeout waiting for HTTP response.')
@responses.activate
def test_unexpected_status_code_timeout(self):
"""
When requests are received without the correct status code before the
timeout, we time out.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
responses.add(
responses.GET, 'http://127.0.0.1:12345/', status=503)
with self.assertRaises(TimeoutError) as cm:
# 190ms is enough time to fail, wait 100ms, fail again, wait 100ms,
# then time out.
wait_for_response(client, 0.19, expected_status_code=200)
self.assertEqual(
str(cm.exception), 'Timeout waiting for HTTP response.')
@responses.activate
def test_timeout(self):
"""
When we don't get a response before the timeout, we time out.
FIXME: Because responses doesn't do timeouts, we fake it by manually
raising the exception we expect. We really should use requests itself
for this.
"""
client = ContainerHttpClient('127.0.0.1', '12345')
responses.add(
responses.GET, 'http://127.0.0.1:12345/',
body=requests.exceptions.Timeout())
with self.assertRaises(TimeoutError) as cm:
# The timeout doesn't actually matter here because we raise the
# exception ourselves.
wait_for_response(client, 0.1)
self.assertEqual(
str(cm.exception), 'Timeout waiting for HTTP response.')
| bsd-3-clause | 8,351,924,333,596,911,000 | 35.307487 | 79 | 0.600707 | false |
pombredanne/jusText | tests/test_dom_utils.py | 1 | 5073 | # -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import unittest
from nose import tools
from lxml import html
from justext.core import preprocessor, html_to_dom
class TestDomUtils(unittest.TestCase):
def test_remove_comments(self):
dom = html.fromstring(
'<html><!-- comment --><body>'
'<h1>Header</h1>'
'<!-- comment --> text'
'<p>footer'
'</body></html>'
)
expected = '<html><!-- comment --><body><h1>Header</h1><!-- comment --> text<p>footer</p></body></html>'
returned = html.tostring(dom).decode("utf8")
tools.assert_equal(expected, returned)
dom = preprocessor(dom)
expected = '<html><body><h1>Header</h1> text<p>footer</p></body></html>'
returned = html.tostring(dom).decode("utf8")
tools.assert_equal(expected, returned)
def test_remove_head_tag(self):
html_string = (
'<html><head><title>Title</title></head><body>'
'<h1>Header</h1>'
'<p><span>text</span></p>'
'<p>footer <em>like</em> a boss</p>'
'</body></html>'
)
dom = html.fromstring(html_string)
returned = html.tostring(dom).decode("utf8")
tools.assert_equal(html_string, returned)
dom = preprocessor(dom)
returned = html.tostring(dom).decode("utf8")
expected = (
'<html><body>'
'<h1>Header</h1>'
'<p><span>text</span></p>'
'<p>footer <em>like</em> a boss</p>'
'</body></html>'
)
tools.assert_equal(expected, returned)
def test_preprocess_simple_unicode_string(self):
html_string = (
'<html><head><title>Title</title></head><body>'
'<h1>Header</h1>'
'<p>pre<span>text</span>post<em>emph</em>popost</p>'
'<p>footer <em>like</em> a boss</p>'
'</body></html>'
)
dom = preprocessor(html_to_dom(html_string))
returned = html.tostring(dom).decode("utf8")
expected = (
'<html><body>'
'<h1>Header</h1>'
'<p>pre<span>text</span>post<em>emph</em>popost</p>'
'<p>footer <em>like</em> a boss</p>'
'</body></html>'
)
tools.assert_equal(expected, returned)
def test_preprocess_simple_bytes_string(self):
html_string = (
b'<html><head><title>Title</title></head><body>'
b'<h1>Header</h1>'
b'<p>pre<span>text</span>post<em>emph</em>popost</p>'
b'<p>footer <em>like</em> a boss</p>'
b' <!-- abcdefgh -->\n'
b'</body></html>'
)
dom = preprocessor(html_to_dom(html_string))
returned = html.tostring(dom).decode("utf8")
expected = (
'<html><body>'
'<h1>Header</h1>'
'<p>pre<span>text</span>post<em>emph</em>popost</p>'
'<p>footer <em>like</em> a boss</p>'
' \n'
'</body></html>'
)
tools.assert_equal(expected, returned)
def test_preprocess_simple_unicode_xhtml_string_with_declaration(self):
html_string = (
'<?xml version="1.0" encoding="windows-1250"?>'
'<!DOCTYPE html>'
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="sk" lang="sk">'
'<head>'
'<title>Hello World</title>'
'<meta http-equiv="imagetoolbar" content="no" />'
'<meta http-equiv="Content-Type" content="text/html; charset=windows-1250" />'
'</head>'
'<body id="index">'
'</body>'
'</html>'
)
dom = preprocessor(html_to_dom(html_string))
returned = html.tostring(dom).decode("utf8")
expected = (
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="sk" lang="sk">'
'<body id="index">'
'</body>'
'</html>'
)
tools.assert_equal(expected, returned)
def test_preprocess_simple_bytes_xhtml_string_with_declaration(self):
html_string = (
b'<?xml version="1.0" encoding="windows-1250"?>'
b'<!DOCTYPE html>'
b'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="sk" lang="sk">'
b'<head>'
b'<title>Hello World</title>'
b'<meta http-equiv="imagetoolbar" content="no" />'
b'<meta http-equiv="Content-Type" content="text/html; charset=windows-1250" />'
b'</head>'
b'<body id="index">'
b'</body>'
b'</html>'
)
dom = preprocessor(html_to_dom(html_string))
returned = html.tostring(dom).decode("utf8")
expected = (
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="sk" lang="sk">'
'<body id="index">'
'</body>'
'</html>'
)
tools.assert_equal(expected, returned)
| bsd-2-clause | -4,193,157,643,998,283,300 | 33.510204 | 112 | 0.506998 | false |
okolisny/integration_tests | cfme/tests/test_rest.py | 1 | 32165 | # -*- coding: utf-8 -*-
"""This module contains REST API specific tests."""
import random
import pytest
import fauxfactory
from cfme import test_requirements
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.rest.gen_data import arbitration_rules as _arbitration_rules
from cfme.rest.gen_data import arbitration_settings as _arbitration_settings
from cfme.rest.gen_data import automation_requests_data
from cfme.rest.gen_data import vm as _vm
from fixtures.provider import setup_one_or_skip
from cfme.utils import error
from cfme.utils.blockers import BZ
from cfme.utils.providers import ProviderFilter
from cfme.utils.rest import assert_response
from cfme.utils.version import current_version
from cfme.utils.wait import wait_for, wait_for_decorator
pytestmark = [test_requirements.rest]
@pytest.fixture(scope="module")
def a_provider(request):
pf = ProviderFilter(classes=[VMwareProvider, RHEVMProvider])
return setup_one_or_skip(request, filters=[pf])
@pytest.fixture(scope='module')
def api_version(appliance):
entry_point = appliance.rest_api._versions.values()[0]
return appliance.new_rest_api_instance(entry_point=entry_point)
@pytest.fixture(scope="function")
def vm(request, a_provider, appliance):
return _vm(request, a_provider, appliance.rest_api)
def wait_for_requests(requests):
def _finished():
for request in requests:
request.reload()
if request.request_state != 'finished':
return False
return True
wait_for(_finished, num_sec=45, delay=5, message="requests finished")
@pytest.mark.tier(2)
@pytest.mark.parametrize(
"from_detail", [True, False],
ids=["from_detail", "from_collection"])
def test_vm_scan(appliance, vm, from_detail):
rest_vm = appliance.rest_api.collections.vms.get(name=vm)
if from_detail:
response = rest_vm.action.scan()
else:
response, = appliance.rest_api.collections.vms.action.scan(rest_vm)
assert_response(appliance)
@wait_for_decorator(timeout="5m", delay=5, message="REST running scanning vm finishes")
def _finished():
response.task.reload()
if response.task.status.lower() in {"error"}:
pytest.fail("Error when running scan vm method: `{}`".format(response.task.message))
return response.task.state.lower() == 'finished'
COLLECTIONS_ADDED_IN_58 = {
"actions", "alert_definitions", "alerts", "authentications", "configuration_script_payloads",
"configuration_script_sources", "load_balancers",
}
COLLECTIONS_REMOVED_IN_59 = {
"arbitration_settings", "arbitration_profiles", "virtual_templates", "arbitration_rules",
}
COLLECTIONS_ALL = {
"actions", "alert_definitions", "alerts", "arbitration_profiles",
"arbitration_rules", "arbitration_settings", "authentications", "automate",
"automate_domains", "automation_requests", "availability_zones",
"blueprints", "categories", "chargebacks", "cloud_networks", "clusters",
"conditions", "configuration_script_payloads",
"configuration_script_sources", "container_deployments", "currencies",
"data_stores", "events", "features", "flavors", "groups", "hosts",
"instances", "load_balancers", "measures", "notifications",
"orchestration_templates", "pictures", "policies", "policy_actions",
"policy_profiles", "providers", "provision_dialogs", "provision_requests",
"rates", "reports", "request_tasks", "requests", "resource_pools",
"results", "roles", "security_groups", "servers", "service_catalogs",
"service_dialogs", "service_orders", "service_requests",
"service_templates", "services", "settings", "tags", "tasks", "templates",
"tenants", "users", "virtual_templates", "vms", "zones"
}
# non-typical collections without "id" and "resources"
COLLECTIONS_OMMITED = {"settings"}
@pytest.mark.tier(3)
@pytest.mark.parametrize("collection_name", COLLECTIONS_ALL)
@pytest.mark.uncollectif(
lambda collection_name:
(collection_name in COLLECTIONS_OMMITED) or
(collection_name in COLLECTIONS_ADDED_IN_58 and current_version() < "5.8") or
(collection_name in COLLECTIONS_REMOVED_IN_59 and current_version() >= "5.9")
)
def test_query_simple_collections(appliance, collection_name):
"""This test tries to load each of the listed collections. 'Simple' collection means that they
have no usable actions that we could try to run
Steps:
* GET /api/<collection_name>
Metadata:
test_flag: rest
"""
collection = getattr(appliance.rest_api.collections, collection_name)
assert_response(appliance)
collection.reload()
list(collection)
@pytest.mark.tier(3)
@pytest.mark.parametrize("collection_name", COLLECTIONS_ALL)
@pytest.mark.uncollectif(
lambda collection_name:
(collection_name in COLLECTIONS_OMMITED) or
(collection_name in COLLECTIONS_ADDED_IN_58 and current_version() < "5.8") or
(collection_name in COLLECTIONS_REMOVED_IN_59 and current_version() >= "5.9")
)
def test_query_with_api_version(api_version, collection_name):
"""Loads each of the listed collections using /api/<version>/<collection>.
Steps:
* GET /api/<version>/<collection_name>
Metadata:
test_flag: rest
"""
collection = getattr(api_version.collections, collection_name)
assert_response(api_version)
collection.reload()
list(collection)
# collections affected by BZ 1437201 in versions < 5.9
COLLECTIONS_BUGGY_ATTRS = {"results", "service_catalogs", "automate", "categories", "roles"}
@pytest.mark.tier(3)
@pytest.mark.parametrize("collection_name", COLLECTIONS_ALL)
@pytest.mark.uncollectif(
lambda collection_name:
(collection_name in COLLECTIONS_ADDED_IN_58 and current_version() < "5.8") or
(collection_name in COLLECTIONS_REMOVED_IN_59 and current_version() >= "5.9")
)
@pytest.mark.meta(blockers=['GH#ManageIQ/manageiq:15754'])
def test_select_attributes(appliance, collection_name):
"""Tests that it's possible to limit returned attributes.
Metadata:
test_flag: rest
"""
if collection_name in COLLECTIONS_BUGGY_ATTRS and current_version() < '5.9':
pytest.skip("Affected by BZ 1437201, cannot test.")
collection = getattr(appliance.rest_api.collections, collection_name)
response = appliance.rest_api.get(
'{}{}'.format(collection._href, '?expand=resources&attributes=id'))
assert_response(appliance)
for resource in response.get('resources', []):
assert 'id' in resource
expected_len = 2 if 'href' in resource else 1
assert len(resource) == expected_len
def test_add_picture(appliance):
"""Tests adding picture.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.pictures
count = collection.count
collection.action.create({
"extension": "png",
"content": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcS"
"JAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="})
assert_response(appliance)
collection.reload()
assert collection.count == count + 1
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_add_picture_invalid_extension(appliance):
"""Tests adding picture with invalid extension.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.pictures
count = collection.count
with error.expected('Extension must be'):
collection.action.create({
"extension": "xcf",
"content": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcS"
"JAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="})
assert_response(appliance, http_status=400)
collection.reload()
assert collection.count == count
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_add_picture_invalid_data(appliance):
"""Tests adding picture with invalid content.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.pictures
count = collection.count
with error.expected('invalid base64'):
collection.action.create({
"extension": "png",
"content": "invalid"})
assert_response(appliance, http_status=400)
collection.reload()
assert collection.count == count
def test_http_options(appliance):
"""Tests OPTIONS http method.
Metadata:
test_flag: rest
"""
assert 'boot_time' in appliance.rest_api.collections.vms.options()['attributes']
assert_response(appliance)
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
@pytest.mark.parametrize("collection_name", ["hosts", "clusters"])
def test_http_options_node_types(appliance, collection_name):
"""Tests that OPTIONS http method on Hosts and Clusters collection returns node_types.
Metadata:
test_flag: rest
"""
collection = getattr(appliance.rest_api.collections, collection_name)
assert 'node_types' in collection.options()['data']
assert_response(appliance)
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_http_options_subcollections(appliance):
"""Tests that OPTIONS returns supported subcollections.
Metadata:
test_flag: rest
"""
assert 'tags' in appliance.rest_api.collections.vms.options()['subcollections']
assert_response(appliance)
def test_server_info(appliance):
"""Check that server info is present.
Metadata:
test_flag: rest
"""
assert all(item in appliance.rest_api.server_info for item in ('appliance', 'build', 'version'))
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_server_info_href(appliance):
"""Check that appliance's server, zone and region is present.
Metadata:
test_flag: rest
"""
items = ('server_href', 'zone_href', 'region_href')
for item in items:
assert item in appliance.rest_api.server_info
assert 'id' in appliance.rest_api.get(appliance.rest_api.server_info[item])
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_default_region(appliance):
"""Check that the default region is present.
Metadata:
test_flag: rest
"""
reg = appliance.rest_api.collections.regions[0]
assert hasattr(reg, 'guid')
assert hasattr(reg, 'region')
def test_product_info(appliance):
"""Check that product info is present.
Metadata:
test_flag: rest
"""
assert all(item in appliance.rest_api.product_info for item in
('copyright', 'name', 'name_full', 'support_website', 'support_website_text'))
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_settings_collection(appliance):
"""Checks that all expected info is present in /api/settings.
Metadata:
test_flag: rest
"""
# the "settings" collection is untypical as it doesn't have "resources" and
# for this reason can't be reloaded (bug in api client)
body = appliance.rest_api.get(appliance.rest_api.collections.settings._href)
assert all(item in body.keys() for item in ('product', 'prototype'))
def test_identity(appliance):
"""Check that user's identity is present.
Metadata:
test_flag: rest
"""
assert all(item in appliance.rest_api.identity for item in
('userid', 'name', 'group', 'role', 'tenant', 'groups'))
def test_user_settings(appliance):
"""Check that user's settings are returned.
Metadata:
test_flag: rest
"""
assert isinstance(appliance.rest_api.settings, dict)
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_datetime_filtering(appliance, a_provider):
"""Tests support for DateTime filtering with timestamps in YYYY-MM-DDTHH:MM:SSZ format.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.vms
url_string = '{}{}'.format(
collection._href,
'?expand=resources&attributes=created_on&sort_by=created_on&sort_order=asc'
'&filter[]=created_on{}{}')
vms_num = len(collection)
assert vms_num > 3
baseline_vm = collection[vms_num / 2]
baseline_datetime = baseline_vm._data['created_on'] # YYYY-MM-DDTHH:MM:SSZ
def _get_filtered_resources(operator):
return appliance.rest_api.get(url_string.format(operator, baseline_datetime))['resources']
older_resources = _get_filtered_resources('<')
newer_resources = _get_filtered_resources('>')
matching_resources = _get_filtered_resources('=')
# this will fail once BZ1437529 is fixed
# should be: ``assert matching_resources``
assert not matching_resources
if older_resources:
last_older = collection.get(id=older_resources[-1]['id'])
assert last_older.created_on < baseline_vm.created_on
if newer_resources:
first_newer = collection.get(id=newer_resources[0]['id'])
# this will fail once BZ1437529 is fixed
# should be: ``assert first_newer.created_on > baseline_vm.created_on``
assert first_newer.created_on == baseline_vm.created_on
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_date_filtering(appliance, a_provider):
"""Tests support for DateTime filtering with timestamps in YYYY-MM-DD format.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.vms
url_string = '{}{}'.format(
collection._href,
'?expand=resources&attributes=created_on&sort_by=created_on&sort_order=desc'
'&filter[]=created_on{}{}')
vms_num = len(collection)
assert vms_num > 3
baseline_vm = collection[vms_num / 2]
baseline_date, _ = baseline_vm._data['created_on'].split('T') # YYYY-MM-DD
def _get_filtered_resources(operator):
return appliance.rest_api.get(url_string.format(operator, baseline_date))['resources']
older_resources = _get_filtered_resources('<')
newer_resources = _get_filtered_resources('>')
matching_resources = _get_filtered_resources('=')
assert matching_resources
if newer_resources:
last_newer = collection.get(id=newer_resources[-1]['id'])
assert last_newer.created_on > baseline_vm.created_on
if older_resources:
first_older = collection.get(id=older_resources[0]['id'])
assert first_older.created_on < baseline_vm.created_on
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_resources_hiding(appliance):
"""Test that it's possible to hide resources in response.
Metadata:
test_flag: rest
"""
roles = appliance.rest_api.collections.roles
resources_visible = appliance.rest_api.get(roles._href + '?filter[]=read_only=true')
assert_response(appliance)
assert 'resources' in resources_visible
resources_hidden = appliance.rest_api.get(
roles._href + '?filter[]=read_only=true&hide=resources')
assert_response(appliance)
assert 'resources' not in resources_hidden
assert resources_hidden['subcount'] == resources_visible['subcount']
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_sorting_by_attributes(appliance):
"""Test that it's possible to sort resources by attributes.
Metadata:
test_flag: rest
"""
url_string = '{}{}'.format(
appliance.rest_api.collections.groups._href,
'?expand=resources&attributes=id&sort_by=id&sort_order={}')
response_asc = appliance.rest_api.get(url_string.format('asc'))
assert_response(appliance)
assert 'resources' in response_asc
response_desc = appliance.rest_api.get(url_string.format('desc'))
assert_response(appliance)
assert 'resources' in response_desc
assert response_asc['subcount'] == response_desc['subcount']
id_last = 0
for resource in response_asc['resources']:
assert resource['id'] > id_last
id_last = resource['id']
id_last += 1
for resource in response_desc['resources']:
assert resource['id'] < id_last
id_last = resource['id']
PAGING_DATA = [
(0, 0),
(1, 0),
(11, 13),
(1, 10000),
]
@pytest.mark.uncollectif(lambda: current_version() < '5.9')
@pytest.mark.parametrize(
'paging', PAGING_DATA, ids=['{},{}'.format(d[0], d[1]) for d in PAGING_DATA])
@pytest.mark.meta(blockers=[
BZ(1489885, forced_streams=['5.9', 'upstream'], unblock=lambda paging: paging[0] != 0),
])
def test_rest_paging(appliance, paging):
"""Tests paging when offset and limit are specified.
Metadata:
test_flag: rest
"""
limit, offset = paging
url_string = '{}{}'.format(
appliance.rest_api.collections.features._href,
'?limit={}&offset={}'.format(limit, offset))
response = appliance.rest_api.get(url_string)
if response['count'] <= offset:
expected_subcount = 0
elif response['count'] - offset >= limit:
expected_subcount = limit
else:
expected_subcount = response['count'] - offset
assert response['subcount'] == expected_subcount
assert len(response['resources']) == expected_subcount
expected_pages_num = (response['count'] / limit) + (1 if response['count'] % limit else 0)
assert response['pages'] == expected_pages_num
links = response['links']
assert 'limit={}&offset={}'.format(limit, offset) in links['self']
if (offset + limit) < response['count']:
assert 'limit={}&offset={}'.format(limit, offset + limit) in links['next']
if offset > 0:
expected_previous_offset = offset - limit if offset > limit else 0
assert 'limit={}&offset={}'.format(limit, expected_previous_offset) in links['previous']
assert 'limit={}&offset={}'.format(limit, 0) in links['first']
expected_last_offset = (response['pages'] - (1 if limit > 1 else 0)) * limit
assert 'limit={}&offset={}'.format(limit, expected_last_offset) in links['last']
COLLECTIONS_BUGGY_HREF_SLUG = {'policy_actions', 'automate_domains'}
@pytest.mark.tier(3)
@pytest.mark.parametrize("collection_name", COLLECTIONS_ALL)
@pytest.mark.uncollectif(
lambda collection_name:
collection_name == 'automate' or # doesn't have 'href'
(collection_name in COLLECTIONS_ADDED_IN_58 and current_version() < '5.8') or
(collection_name in COLLECTIONS_REMOVED_IN_59 and current_version() >= '5.9')
)
@pytest.mark.meta(blockers=[BZ(
1485310,
forced_streams=['5.8', 'upstream'],
unblock=lambda collection_name: collection_name not in COLLECTIONS_BUGGY_HREF_SLUG)])
def test_attributes_present(appliance, collection_name):
"""Tests that the expected attributes are present in all collections.
Metadata:
test_flag: rest
"""
attrs = 'href,id,href_slug'
collection = getattr(appliance.rest_api.collections, collection_name)
response = appliance.rest_api.get(
'{0}{1}{2}'.format(collection._href, '?expand=resources&attributes=', attrs))
assert_response(appliance)
for resource in response.get('resources', []):
assert 'id' in resource
assert 'href' in resource
assert resource['href'] == '{}/{}'.format(collection._href, resource['id'])
if current_version() >= '5.8':
assert 'href_slug' in resource
assert resource['href_slug'] == '{}/{}'.format(collection.name, resource['id'])
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
@pytest.mark.parametrize('vendor', ['Microsoft', 'Redhat', 'Vmware'])
def test_collection_class_valid(appliance, a_provider, vendor):
"""Tests that it's possible to query using collection_class.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.vms
resource_type = collection[0].type
tested_type = 'ManageIQ::Providers::{}::InfraManager::Vm'.format(vendor)
response = collection.query_string(collection_class=tested_type)
if resource_type == tested_type:
assert response.count > 0
# all returned entities must have the same type
if response.count:
rand_num = 5 if response.count >= 5 else response.count
rand_entities = random.sample(response, rand_num)
for entity in rand_entities:
assert entity.type == tested_type
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_collection_class_invalid(appliance):
"""Tests that it's not possible to query using invalid collection_class.
Metadata:
test_flag: rest
"""
with error.expected('Invalid collection_class'):
appliance.rest_api.collections.vms.query_string(
collection_class='ManageIQ::Providers::Nonexistent::Vm')
class TestBulkQueryRESTAPI(object):
def test_bulk_query(self, appliance):
"""Tests bulk query referencing resources by attributes id, href and guid
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.events
data0, data1, data2 = collection[0]._data, collection[1]._data, collection[2]._data
response = appliance.rest_api.collections.events.action.query(
{'id': data0['id']}, {'href': data1['href']}, {'guid': data2['guid']})
assert_response(appliance)
assert len(response) == 3
assert (data0 == response[0]._data and
data1 == response[1]._data and
data2 == response[2]._data)
def test_bulk_query_users(self, appliance):
"""Tests bulk query on 'users' collection
Metadata:
test_flag: rest
"""
data = appliance.rest_api.collections.users[0]._data
response = appliance.rest_api.collections.users.action.query(
{'name': data['name']}, {'userid': data['userid']})
assert_response(appliance)
assert len(response) == 2
assert data['id'] == response[0]._data['id'] == response[1]._data['id']
def test_bulk_query_roles(self, appliance):
"""Tests bulk query on 'roles' collection
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.roles
data0, data1 = collection[0]._data, collection[1]._data
response = appliance.rest_api.collections.roles.action.query(
{'name': data0['name']}, {'name': data1['name']})
assert_response(appliance)
assert len(response) == 2
assert data0 == response[0]._data and data1 == response[1]._data
def test_bulk_query_groups(self, appliance):
"""Tests bulk query on 'groups' collection
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.groups
data0, data1 = collection[0]._data, collection[1]._data
response = appliance.rest_api.collections.groups.action.query(
{'description': data0['description']}, {'description': data1['description']})
assert_response(appliance)
assert len(response) == 2
assert data0 == response[0]._data and data1 == response[1]._data
class TestArbitrationSettingsRESTAPI(object):
@pytest.fixture(scope='function')
def arbitration_settings(self, request, appliance):
num_settings = 2
response = _arbitration_settings(request, appliance.rest_api, num=num_settings)
assert_response(appliance)
assert len(response) == num_settings
return response
def test_create_arbitration_settings(self, appliance, arbitration_settings):
"""Tests create arbitration settings.
Metadata:
test_flag: rest
"""
for setting in arbitration_settings:
record = appliance.rest_api.collections.arbitration_settings.get(id=setting.id)
assert record._data == setting._data
@pytest.mark.parametrize('method', ['post', 'delete'])
def test_delete_arbitration_settings_from_detail(self, appliance, arbitration_settings, method):
"""Tests delete arbitration settings from detail.
Metadata:
test_flag: rest
"""
for setting in arbitration_settings:
setting.action.delete(force_method=method)
assert_response(appliance)
with error.expected('ActiveRecord::RecordNotFound'):
setting.action.delete(force_method=method)
assert_response(appliance, http_status=404)
def test_delete_arbitration_settings_from_collection(self, appliance, arbitration_settings):
"""Tests delete arbitration settings from collection.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.arbitration_settings
collection.action.delete(*arbitration_settings)
assert_response(appliance)
with error.expected('ActiveRecord::RecordNotFound'):
collection.action.delete(*arbitration_settings)
assert_response(appliance, http_status=404)
@pytest.mark.parametrize(
"from_detail", [True, False],
ids=["from_detail", "from_collection"])
def test_edit_arbitration_settings(self, appliance, arbitration_settings, from_detail):
"""Tests edit arbitration settings.
Metadata:
test_flag: rest
"""
num_settings = len(arbitration_settings)
uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_settings)]
new = [{'name': 'test_edit{}'.format(u), 'display_name': 'Test Edit{}'.format(u)}
for u in uniq]
if from_detail:
edited = []
for i in range(num_settings):
edited.append(arbitration_settings[i].action.edit(**new[i]))
assert_response(appliance)
else:
for i in range(num_settings):
new[i].update(arbitration_settings[i]._ref_repr())
edited = appliance.rest_api.collections.arbitration_settings.action.edit(*new)
assert_response(appliance)
assert len(edited) == num_settings
for i in range(num_settings):
assert (edited[i].name == new[i]['name'] and
edited[i].display_name == new[i]['display_name'])
class TestArbitrationRulesRESTAPI(object):
@pytest.fixture(scope='function')
def arbitration_rules(self, request, appliance):
num_rules = 2
response = _arbitration_rules(request, appliance.rest_api, num=num_rules)
assert_response(appliance)
assert len(response) == num_rules
return response
@pytest.mark.uncollectif(lambda: current_version() >= '5.9')
def test_create_arbitration_rules(self, arbitration_rules, appliance):
"""Tests create arbitration rules.
Metadata:
test_flag: rest
"""
for rule in arbitration_rules:
record = appliance.rest_api.collections.arbitration_rules.get(id=rule.id)
assert record.description == rule.description
# there's no test for the DELETE method as it is not working and won't be fixed, see BZ 1410504
@pytest.mark.uncollectif(lambda: current_version() >= '5.9')
def test_delete_arbitration_rules_from_detail_post(self, arbitration_rules, appliance):
"""Tests delete arbitration rules from detail.
Metadata:
test_flag: rest
"""
for entity in arbitration_rules:
entity.action.delete.POST()
assert_response(appliance)
with error.expected('ActiveRecord::RecordNotFound'):
entity.action.delete.POST()
assert_response(appliance, http_status=404)
@pytest.mark.uncollectif(lambda: current_version() >= '5.9')
def test_delete_arbitration_rules_from_collection(self, arbitration_rules, appliance):
"""Tests delete arbitration rules from collection.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.arbitration_rules
collection.action.delete(*arbitration_rules)
assert_response(appliance)
with error.expected('ActiveRecord::RecordNotFound'):
collection.action.delete(*arbitration_rules)
assert_response(appliance, http_status=404)
@pytest.mark.uncollectif(lambda: current_version() >= '5.9')
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_edit_arbitration_rules(self, arbitration_rules, appliance, from_detail):
"""Tests edit arbitration rules.
Metadata:
test_flag: rest
"""
num_rules = len(arbitration_rules)
uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_rules)]
new = [{'description': 'new test admin rule {}'.format(u)} for u in uniq]
if from_detail:
edited = []
for i in range(num_rules):
edited.append(arbitration_rules[i].action.edit(**new[i]))
assert_response(appliance)
else:
for i in range(num_rules):
new[i].update(arbitration_rules[i]._ref_repr())
edited = appliance.rest_api.collections.arbitration_rules.action.edit(*new)
assert_response(appliance)
assert len(edited) == num_rules
for i in range(num_rules):
assert edited[i].description == new[i]['description']
class TestNotificationsRESTAPI(object):
@pytest.fixture(scope='function')
def generate_notifications(self, appliance):
requests_data = automation_requests_data('nonexistent_vm')
requests = appliance.rest_api.collections.automation_requests.action.create(
*requests_data[:2])
assert len(requests) == 2
wait_for_requests(requests)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_mark_notifications(self, appliance, generate_notifications, from_detail):
"""Tests marking notifications as seen.
Metadata:
test_flag: rest
"""
unseen = appliance.rest_api.collections.notifications.find_by(seen=False)
notifications = [unseen[-i] for i in range(1, 3)]
if from_detail:
for ent in notifications:
ent.action.mark_as_seen()
assert_response(appliance)
else:
appliance.rest_api.collections.notifications.action.mark_as_seen(*notifications)
assert_response(appliance)
for ent in notifications:
ent.reload()
assert ent.seen
@pytest.mark.parametrize('method', ['post', 'delete'])
def test_delete_notifications_from_detail(self, appliance, generate_notifications, method):
"""Tests delete notifications from detail.
Metadata:
test_flag: rest
"""
if method == 'delete' and BZ('1420872', forced_streams=['5.7', '5.8', 'upstream']).blocks:
pytest.skip("Affected by BZ1420872, cannot test.")
collection = appliance.rest_api.collections.notifications
collection.reload()
notifications = [collection[-i] for i in range(1, 3)]
for entity in notifications:
entity.action.delete(force_method=method)
assert_response(appliance)
with error.expected('ActiveRecord::RecordNotFound'):
entity.action.delete(force_method=method)
assert_response(appliance, http_status=404)
def test_delete_notifications_from_collection(self, appliance, generate_notifications):
"""Tests delete notifications from collection.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.notifications
collection.reload()
notifications = [collection[-i] for i in range(1, 3)]
collection.action.delete(*notifications)
assert_response(appliance)
with error.expected("ActiveRecord::RecordNotFound"):
collection.action.delete(*notifications)
assert_response(appliance, http_status=404)
| gpl-2.0 | 4,843,326,511,060,586,000 | 36.357724 | 100 | 0.656801 | false |
bloem-project/bloem-server | files/models.py | 1 | 3838 | # -*- coding: utf-8 -*-
"""Model definitions for Bloem's files application.
This module defines the various models used as part of Bloem's files
application.
"""
import os
from django.db import models
class Directory(models.Model):
"""Defines the Directory model used in Bloem's files application.
Fields:
path (CharField): Path of the directory.
"""
path = models.CharField(max_length=4096)
class Meta:
verbose_name = "directory"
verbose_name_plural = "directories"
class Namespace(models.Model):
"""Defines the Namespace model used in Bloem's files application.
Fields:
name (CharField): Name of the namespace.
"""
name = models.CharField(unique=True, max_length=64)
class Meta:
verbose_name = "namespace"
verbose_name_plural = "namespaces"
class Tag(models.Model):
"""Defines the Tag model used in Bloem's files application.
Fields:
name (CharField): Name of the tag.
namespace (ForeignKey): Points to the namespace.
"""
name = models.CharField(unique=True, max_length=64)
namespace = models.ForeignKey(Namespace, on_delete=models.CASCADE)
class Meta:
verbose_name = "tag"
verbose_name_plural = "tags"
class File(models.Model):
"""Defines the File model used in Bloem's files application.
Fields:
hash (CharField): SHA256 hash of the file.
file_name (CharField): Name of the file.
path (CharField): Absolute path of the file, excluding the actual
filename.
date_added (DateTimeField): Date and time when the file was added to
the database.
date_modified (DateTimeField): Date and time when the file was modified
in the database.
"""
hash = models.CharField(max_length=64, unique=True)
file_name = models.CharField(max_length=256)
directory = models.ForeignKey(Directory, on_delete=models.CASCADE)
path = models.CharField(max_length=4096)
date_added = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
tags = models.ManyToManyField(Tag, blank=True)
def _get_full_path(self):
return os.path.join(self.path, self.file_name)
full_path = property(_get_full_path)
def __str__(self):
"""Output the file's name."""
return self.file_name
class Meta:
ordering = ["file_name"]
get_latest_by = "date_added"
verbose_name = "file"
verbose_name_plural = "files"
class InboxItem(models.Model):
"""Defines the InboxItem model used in Bloem's files application.
Fields:
file (OneToOneField): Points to the File object.
"""
file = models.OneToOneField(File)
def __str__(self):
"""Output the file's name."""
return self.file.file_name
class Meta:
verbose_name = "inbox item"
verbose_name_plural = "inbox items"
class Person(models.Model):
"""Defines the Person model used in Bloem's files application.
This model is deliberately meant to be as wide as
possible, with all fields being optional to allow
users to choose which field they wish to fill for
each person at their own discretion.
Fields:
"""
MALE = 'ML'
FEMALE = 'FM'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
)
first_name = models.CharField(blank=True, null=True, max_length=64)
last_name = models.CharField(blank=True, null=True, max_length=64)
gender = models.CharField(max_length=2, blank=True, null=True)
date_of_birth = models.DateField(blank=True, null=True)
class Meta:
verbose_name = "person"
verbose_name_plural = "persons"
| gpl-3.0 | 7,179,614,170,286,121,000 | 27.857143 | 79 | 0.633924 | false |
Akagi201/learning-python | pyramid/pyramid_pycharm/pyramid_pycharm/tests.py | 1 | 1506 | import unittest
import transaction
from pyramid import testing
from .models import DBSession
class TestMyViewSuccessCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from .models import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = MyModel(name='one', value=55)
DBSession.add(model)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_passing_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['one'].name, 'one')
self.assertEqual(info['project'], 'pyramid_pycharm')
class TestMyViewFailureCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from .models import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_failing_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info.status_int, 500) | mit | -1,510,835,813,097,960,200 | 26.4 | 60 | 0.609562 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.