repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
flacjacket/sympy | sympy/combinatorics/tests/test_perm_groups.py | 2 | 21720 | from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.group_constructs import DirectProduct
from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup,\
DihedralGroup, AlternatingGroup, AbelianGroup
from sympy.combinatorics.permutations import Permutation, perm_af_muln, cyclic
from sympy.utilities.pytest import raises, skip, XFAIL
from sympy.combinatorics.generators import rubik_cube_generators
import random
from sympy.combinatorics.testutil import _verify_bsgs, _verify_centralizer,\
_cmp_perm_lists, _verify_normal_closure
from sympy.combinatorics.util import _distribute_gens_by_base
def test_new():
a = Permutation([1, 0])
G = PermutationGroup([a])
assert G.is_abelian
a = Permutation([2, 0, 1])
b = Permutation([2, 1, 0])
G = PermutationGroup([a, b])
assert not G.is_abelian
def test1():
a = Permutation([2, 0, 1, 3, 4, 5])
b = Permutation([0, 2, 1, 3, 4])
g = PermutationGroup([a, b])
raises(ValueError, lambda: test1())
def test_generate():
a = Permutation([1, 0])
g = PermutationGroup([a]).generate()
assert list(g) == [Permutation([0, 1]), Permutation([1, 0])]
g = PermutationGroup([a]).generate(method='dimino')
assert list(g) == [Permutation([0, 1]), Permutation([1, 0])]
a = Permutation([2, 0, 1])
b = Permutation([2, 1, 0])
G = PermutationGroup([a, b])
g = G.generate()
v1 = [p.array_form for p in list(g)]
v1.sort()
assert v1 == [[0,1,2], [0,2,1], [1,0,2], [1,2,0], [2,0,1], [2,1,0]]
v2 = list(G.generate(method='dimino', af=True))
assert v1 == sorted(v2)
a = Permutation([2, 0, 1, 3, 4, 5])
b = Permutation([2, 1, 3, 4, 5, 0])
g = PermutationGroup([a, b]).generate(af=True)
assert len(list(g)) == 360
def test_order():
a = Permutation([2,0,1,3,4,5,6,7,8,9])
b = Permutation([2,1,3,4,5,6,7,8,9,0])
g = PermutationGroup([a, b])
assert g.order() == 1814400
def test_stabilizer():
a = Permutation([2,0,1,3,4,5])
b = Permutation([2,1,3,4,5,0])
G = PermutationGroup([a,b])
G0 = G.stabilizer(0)
assert G0.order() == 60
gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]]
gens = [Permutation(p) for p in gens_cube]
G = PermutationGroup(gens)
G2 = G.stabilizer(2)
assert G2.order() == 6
G2_1 = G2.stabilizer(1)
v = list(G2_1.generate(af=True))
assert v == [[0, 1, 2, 3, 4, 5, 6, 7], [3, 1, 2, 0, 7, 5, 6, 4]]
gens = ((1,2,0,4,5,3,6,7,8,9,10,11,12,13,14,15,16,17,18,19),
(0,1,2,3,4,5,19,6,8,9,10,11,12,13,14,15,16,7,17,18),
(0,1,2,3,4,5,6,7,9,18,16,11,12,13,14,15,8,17,10,19))
gens = [Permutation(p) for p in gens]
G = PermutationGroup(gens)
G2 = G.stabilizer(2)
assert G2.order() == 181440
def test_center():
# the center of the dihedral group D_n is of order 2 for even n
for i in (4, 6, 10):
D = DihedralGroup(i)
assert (D.center()).order() == 2
# the center of the dihedral group D_n is of order 1 for odd n>2
for i in (3, 5, 7):
D = DihedralGroup(i)
assert (D.center()).order() == 1
# the center of an abelian group is the group itself
for i in (2, 3, 5):
for j in (1, 5, 7):
for k in (1, 1, 11):
G = AbelianGroup(i, j, k)
assert G.center() == G
# the center of a nonabelian simple group is trivial
for i in(1, 5, 9):
A = AlternatingGroup(i)
assert (A.center()).order() == 1
# brute-force verifications
D = DihedralGroup(5)
A = AlternatingGroup(3)
C = CyclicGroup(4)
G = D*A*C
assert _verify_centralizer(G, G)
def test_centralizer():
# the centralizer of the trivial group is the entire group
S = SymmetricGroup(2)
assert S.centralizer(Permutation(range(2))) == S
A = AlternatingGroup(5)
assert A.centralizer(Permutation(range(5))) == A
# a centralizer in the trivial group is the trivial group itself
triv = PermutationGroup([Permutation([0,1,2,3])])
D = DihedralGroup(4)
assert triv.centralizer(D) == triv
# brute-force verifications for centralizers of groups
for i in (4, 5, 6):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
C = CyclicGroup(i)
D = DihedralGroup(i)
for gp in (S, A, C, D):
for gp2 in (S, A, C, D):
if gp2 != gp:
assert _verify_centralizer(gp, gp2)
# verify the centralizer for all elements of several groups
S = SymmetricGroup(5)
elements = list(S.generate_dimino())
for element in elements:
assert _verify_centralizer(S, element)
A = AlternatingGroup(5)
elements = list(A.generate_dimino())
for element in elements:
assert _verify_centralizer(A, element)
D = DihedralGroup(7)
elements = list(D.generate_dimino())
for element in elements:
assert _verify_centralizer(D, element)
# verify centralizers of small groups within small groups
small = []
for i in (1, 2, 3):
small.append(SymmetricGroup(i))
small.append(AlternatingGroup(i))
small.append(DihedralGroup(i))
small.append(CyclicGroup(i))
for gp in small:
for gp2 in small:
if gp.degree == gp2.degree:
assert _verify_centralizer(gp, gp2)
def test_coset_repr():
a = Permutation([0, 2, 1])
b = Permutation([1, 0, 2])
G = PermutationGroup([a, b])
assert G.coset_repr() == [[[0,1,2], [1,0,2], [2,0,1]], [[0,1,2], [0,2,1]]]
assert G.stabilizers_gens() == [[0, 2, 1]]
def test_coset_rank():
gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]]
gens = [Permutation(p) for p in gens_cube]
G = PermutationGroup(gens)
i = 0
for h in G.generate(af=True):
rk = G.coset_rank(h)
assert rk == i
h1 = G.coset_unrank(rk, af=True)
assert h == h1
i += 1
assert G.coset_unrank(48) == None
assert G.coset_rank(gens[0]) == 6
assert G.coset_unrank(6) == gens[0]
def test_coset_decomposition():
a = Permutation([2,0,1,3,4,5])
b = Permutation([2,1,3,4,5,0])
g = PermutationGroup([a, b])
assert g.order() == 360
rep = g.coset_repr()
d = Permutation([1,0,2,3,4,5])
assert not g.coset_decomposition(d.array_form)
assert not g.has_element(d)
c = Permutation([1,0,2,3,5,4])
v = g.coset_decomposition(c)
assert perm_af_muln(*v) == [1,0,2,3,5,4]
assert g.has_element(c)
a = Permutation([0,2,1])
g = PermutationGroup([a])
c = Permutation([2,1,0])
assert not g.coset_decomposition(c)
assert g.coset_rank(c) == None
def test_orbits():
a = Permutation([2, 0, 1])
b = Permutation([2, 1, 0])
g = PermutationGroup([a, b])
assert g.orbit(0) == set([0, 1, 2])
assert g.orbits() == [set([0, 1, 2])]
assert g.is_transitive
assert g.orbits(rep=True) == [0]
assert g.orbit_transversal(0) == \
[Permutation([0, 1, 2]), Permutation([2, 0, 1]), Permutation([1, 2, 0])]
assert g.orbit_transversal(0, True) == \
[(0, Permutation([0, 1, 2])), (2, Permutation([2, 0, 1])), \
(1, Permutation([1, 2, 0]))]
a = Permutation(range(1, 100) + [0])
G = PermutationGroup([a])
assert G.orbits(rep=True) == [0]
gens = rubik_cube_generators()
g = PermutationGroup(gens, 48)
assert g.orbits(rep=True) == [0, 1]
assert not g.is_transitive
def test_is_normal():
gens_s5 = [Permutation(p) for p in [[1,2,3,4,0], [2,1,4,0,3]]]
G1 = PermutationGroup(gens_s5)
assert G1.order() == 120
gens_a5 = [Permutation(p) for p in [[1,0,3,2,4], [2,1,4,3,0]]]
G2 = PermutationGroup(gens_a5)
assert G2.order() == 60
assert G2.is_normal(G1)
gens3 = [Permutation(p) for p in [[2,1,3,0,4], [1,2,0,3,4]]]
G3 = PermutationGroup(gens3)
assert not G3.is_normal(G1)
assert G3.order() == 12
G4 = G1.normal_closure(G3.generators)
assert G4.order() == 60
gens5 = [Permutation(p) for p in [[1,2,3,0,4], [1,2,0,3,4]]]
G5 = PermutationGroup(gens5)
assert G5.order() == 24
G6 = G1.normal_closure(G5.generators)
assert G6.order() == 120
assert G1 == G6
assert G1 != G4
assert G2 == G4
def test_eq():
a = [[1,2,0,3,4,5], [1,0,2,3,4,5], [2,1,0,3,4,5], [1,2,0,3,4,5]]
a = [Permutation(p) for p in a + [[1,2,3,4,5,0]]]
g = Permutation([1,2,3,4,5,0])
G1, G2, G3 = [PermutationGroup(x) for x in [a[:2],a[2:4],[g, g**2]]]
assert G1.order() == G2.order() == G3.order() == 6
assert G1 == G2
assert G1 != G3
G4 = PermutationGroup([Permutation([0,1])])
assert G1 != G4
assert not G4.is_subgroup(G1)
def test_derived_subgroup():
a = Permutation([1, 0, 2, 4, 3])
b = Permutation([0, 1, 3, 2, 4])
G = PermutationGroup([a,b])
C = G.derived_subgroup()
assert C.order() == 3
assert C.is_normal(G)
assert C.is_subgroup(G)
assert not G.is_subgroup(C)
gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]]
gens = [Permutation(p) for p in gens_cube]
G = PermutationGroup(gens)
C = G.derived_subgroup()
assert C.order() == 12
def test_is_solvable():
a = Permutation([1,2,0])
b = Permutation([1,0,2])
G = PermutationGroup([a, b])
assert G.is_solvable
a = Permutation([1,2,3,4,0])
b = Permutation([1,0,2,3,4])
G = PermutationGroup([a, b])
assert not G.is_solvable
def test_rubik1():
gens = rubik_cube_generators()
gens1 = [gens[0]] + [p**2 for p in gens[1:]]
G1 = PermutationGroup(gens1)
assert G1.order() == 19508428800
gens2 = [p**2 for p in gens]
G2 = PermutationGroup(gens2)
assert G2.order() == 663552
assert G2.is_subgroup(G1)
C1 = G1.derived_subgroup()
assert C1.order() == 4877107200
assert C1.is_subgroup(G1)
assert not G2.is_subgroup(C1)
@XFAIL
def test_rubik():
skip('takes too much time')
gens = rubik_cube_generators()
G = PermutationGroup(gens)
assert G.order() == 43252003274489856000
G1 = PermutationGroup(gens[:3])
assert G1.order() == 170659735142400
assert not G1.is_normal(G)
G2 = G.normal_closure(G1.generators)
assert G2 == G
def test_direct_product():
C = CyclicGroup(4)
D = DihedralGroup(4)
G = C*C*C
assert G.order() == 64
assert G.degree == 12
assert len(G.orbits()) == 3
assert G.is_abelian == True
H = D*C
assert H.order() == 32
assert H.is_abelian == False
def test_orbit_rep():
G = DihedralGroup(6)
assert G.orbit_rep(1,3) in [Permutation([2, 3, 4, 5, 0, 1]),\
Permutation([4, 3, 2, 1, 0, 5])]
H = CyclicGroup(4)*G
assert H.orbit_rep(1, 5) == False
def test_schreier_vector():
G = CyclicGroup(50)
v = [0]*50
v[23] = -1
assert G.schreier_vector(23) == v
H = DihedralGroup(8)
assert H.schreier_vector(2) == [0, 1, -1, 0, 0, 1, 0, 0]
L = SymmetricGroup(4)
assert L.schreier_vector(1) == [1, -1, 0, 0]
def test_random_pr():
D = DihedralGroup(6)
r = 11
n = 3
_random_prec_n = {}
_random_prec_n[0] = {'s': 7, 't': 3, 'x': 2, 'e': -1}
_random_prec_n[1] = {'s': 5, 't': 5, 'x': 1, 'e': -1}
_random_prec_n[2] = {'s': 3, 't': 4, 'x': 2, 'e': 1}
D._random_pr_init(r, n, _random_prec_n = _random_prec_n)
assert D._random_gens[11] == Permutation([0, 1, 2, 3, 4, 5])
_random_prec = {'s': 2, 't': 9, 'x': 1, 'e': -1}
assert D.random_pr(_random_prec = _random_prec) == \
Permutation([0, 5, 4, 3, 2, 1])
def test_is_alt_sym():
G = DihedralGroup(10)
assert G.is_alt_sym() == False
S = SymmetricGroup(10)
N_eps = 10
_random_prec = {'N_eps': N_eps,
0: Permutation([[2], [1, 4], [0, 6, 7, 8, 9, 3, 5]]),
1: Permutation([[1, 8, 7, 6, 3, 5, 2, 9], [0, 4]]),
2: Permutation([[5, 8], [4, 7], [0, 1, 2, 3, 6, 9]]),
3: Permutation([[3], [0, 8, 2, 7, 4, 1, 6, 9, 5]]),
4: Permutation([[8], [4, 7, 9], [3, 6], [0, 5, 1, 2]]),
5: Permutation([[6], [0, 2, 4, 5, 1, 8, 3, 9, 7]]),
6: Permutation([[6, 9, 8], [4, 5], [1, 3, 7], [0, 2]]),
7: Permutation([[4], [0, 2, 9, 1, 3, 8, 6, 5, 7]]),
8: Permutation([[1, 5, 6, 3], [0, 2, 7, 8, 4, 9]]),
9: Permutation([[8], [6, 7], [2, 3, 4, 5], [0, 1, 9]])}
assert S.is_alt_sym(_random_prec = _random_prec) == True
A = AlternatingGroup(10)
_random_prec = {'N_eps': N_eps,
0: Permutation([[1, 6, 4, 2, 7, 8, 5, 9, 3], [0]]),
1: Permutation([[1], [0, 5, 8, 4, 9, 2, 3, 6, 7]]),
2: Permutation([[1, 9, 8, 3, 2, 5], [0, 6, 7, 4]]),
3: Permutation([[6, 8, 9], [4, 5], [1, 3, 7, 2], [0]]),
4: Permutation([[8], [5], [4], [2, 6, 9, 3], [1], [0, 7]]),
5: Permutation([[3, 6], [0, 8, 1, 7, 5, 9, 4, 2]]),
6: Permutation([[5], [2, 9], [1, 8, 3], [0, 4, 7, 6]]),
7: Permutation([[1, 8, 4, 7, 2, 3], [0, 6, 9, 5]]),
8: Permutation([[5, 8, 7], [3], [1, 4, 2, 6], [0, 9]]),
9: Permutation([[4, 9, 6], [3, 8], [1, 2], [0, 5, 7]])}
assert A.is_alt_sym(_random_prec = _random_prec) == False
def test_minimal_block():
D = DihedralGroup(6)
block_system = D.minimal_block([0,3])
for i in range(3):
assert block_system[i] == block_system[i+3]
S = SymmetricGroup(6)
assert S.minimal_block([0, 1]) == [0, 0, 0, 0, 0, 0]
def test_max_div():
S = SymmetricGroup(10)
assert S.max_div == 5
def test_is_primitive():
S = SymmetricGroup(5)
assert S.is_primitive() == True
C = CyclicGroup(7)
assert C.is_primitive() == True
def test_random_stab():
S = SymmetricGroup(5)
_random_el = Permutation([1, 3, 2, 0, 4])
_random_prec = {'rand': _random_el}
g = S.random_stab(2, _random_prec = _random_prec)
assert g == Permutation([1, 3, 2, 0, 4])
h = S.random_stab(1)
assert h(1) == 1
def test_transitivity_degree():
perm = Permutation([1, 2, 0])
C = PermutationGroup([perm])
assert C.transitivity_degree == 1
gen1 = Permutation([1, 2, 0, 3, 4])
gen2 = Permutation([1, 2, 3, 4, 0])
# alternating group of degree 5
Alt = PermutationGroup([gen1, gen2])
assert Alt.transitivity_degree == 3
def test_schreier_sims_random():
S = SymmetricGroup(3)
base = [0, 1]
strong_gens = [Permutation([1, 2, 0]), Permutation([1, 0, 2]),\
Permutation([0, 2, 1])]
assert S.schreier_sims_random(base, strong_gens, 5) == (base, strong_gens)
D = DihedralGroup(3)
_random_prec = {'g': [Permutation([2, 0, 1]), Permutation([1, 2, 0]),\
Permutation([1, 0, 2])]}
base = [0, 1]
strong_gens = [Permutation([1, 2, 0]), Permutation([2, 1, 0]),\
Permutation([0, 2, 1])]
assert D.schreier_sims_random([], D.generators, 2,\
_random_prec=_random_prec) == (base, strong_gens)
def test_baseswap():
S = SymmetricGroup(4)
S.schreier_sims()
base = S.base
strong_gens = S.strong_gens
assert base == [0, 1, 2]
deterministic = S.baseswap(base, strong_gens, 1, randomized=False)
randomized = S.baseswap(base, strong_gens, 1)
assert deterministic[0] == [0, 2, 1]
assert _verify_bsgs(S, deterministic[0], deterministic[1]) == True
assert randomized[0] == [0, 2, 1]
assert _verify_bsgs(S, randomized[0], randomized[1]) == True
def test_schreier_sims_incremental():
identity = Permutation([0, 1, 2, 3, 4])
TrivialGroup = PermutationGroup([identity])
base, strong_gens = TrivialGroup.schreier_sims_incremental(base=[0, 1, 2])
assert _verify_bsgs(TrivialGroup, base, strong_gens) == True
S = SymmetricGroup(5)
base, strong_gens = S.schreier_sims_incremental(base=[0,1,2])
assert _verify_bsgs(S, base, strong_gens) == True
D = DihedralGroup(2)
base, strong_gens = D.schreier_sims_incremental(base=[1])
assert _verify_bsgs(D, base, strong_gens) == True
A = AlternatingGroup(7)
gens = A.generators[:]
gen0 = gens[0]
gen1 = gens[1]
gen1 = gen1*(~gen0)
gen0 = gen0*gen1
gen1 = gen0*gen1
base, strong_gens = A.schreier_sims_incremental(base=[0,1], gens=gens)
assert _verify_bsgs(A, base, strong_gens) == True
C = CyclicGroup(11)
gen = C.generators[0]
base, strong_gens = C.schreier_sims_incremental(gens=[gen**3])
assert _verify_bsgs(C, base, strong_gens) == True
def test_subgroup_search():
prop_true = lambda x: True
prop_fix_points = lambda x: [x(point) for point in points] == points
prop_comm_g = lambda x: x*g == g*x
prop_even = lambda x: x.is_even
for i in range(10, 17, 2):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
C = CyclicGroup(i)
Sym = S.subgroup_search(prop_true)
assert Sym == S
Alt = S.subgroup_search(prop_even)
assert Alt == A
Sym = S.subgroup_search(prop_true, init_subgroup=C)
assert Sym == S
points = [7]
assert S.stabilizer(7) == S.subgroup_search(prop_fix_points)
points = [3, 4]
assert S.stabilizer(3).stabilizer(4) ==\
S.subgroup_search(prop_fix_points)
points = [3, 5]
fix35 = A.subgroup_search(prop_fix_points)
points = [5]
fix5 = A.subgroup_search(prop_fix_points)
assert A.subgroup_search(prop_fix_points, init_subgroup=fix35) == fix5
base, strong_gens = A.schreier_sims_incremental()
g = A.generators[0]
comm_g =\
A.subgroup_search(prop_comm_g, base=base, strong_gens=strong_gens)
assert _verify_bsgs(comm_g, base, comm_g.generators) == True
assert [prop_comm_g(gen) == True for gen in comm_g.generators]
def test_normal_closure():
# the normal closure of the trivial group is trivial
S = SymmetricGroup(3)
identity = Permutation([0, 1, 2])
closure = S.normal_closure(identity)
assert closure.is_trivial
# the normal closure of the entire group is the entire group
A = AlternatingGroup(4)
assert A.normal_closure(A) == A
# brute-force verifications for subgroups
for i in (3, 4, 5):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
D = DihedralGroup(i)
C = CyclicGroup(i)
for gp in (A, D, C):
assert _verify_normal_closure(S, gp)
# brute-force verifications for all elements of a group
S = SymmetricGroup(5)
elements = list(S.generate_dimino())
for element in elements:
assert _verify_normal_closure(S, element)
# small groups
small = []
for i in (1, 2, 3):
small.append(SymmetricGroup(i))
small.append(AlternatingGroup(i))
small.append(DihedralGroup(i))
small.append(CyclicGroup(i))
for gp in small:
for gp2 in small:
if gp2.is_subgroup(gp):
assert _verify_normal_closure(gp, gp2)
def test_derived_series():
# the derived series of the trivial group consists only of the trivial group
triv = PermutationGroup([Permutation([0, 1, 2])])
assert triv.derived_series() == [triv]
# the derived series for a simple group consists only of the group itself
for i in (5, 6, 7):
A = AlternatingGroup(i)
assert A.derived_series() == [A]
# the derived series for S_4 is S_4 > A_4 > K_4 > triv
S = SymmetricGroup(4)
series = S.derived_series()
assert series[1] == AlternatingGroup(4)
assert series[2] == DihedralGroup(2)
assert series[3].is_trivial
def test_lower_central_series():
# the lower central series of the trivial group consists of the trivial
# group
triv = PermutationGroup([Permutation([0, 1, 2])])
assert triv.lower_central_series() == [triv]
# the lower central series of a simple group consists of the group itself
for i in (5, 6, 7):
A = AlternatingGroup(i)
assert A.lower_central_series() == [A]
# GAP-verified example
S = SymmetricGroup(6)
series = S.lower_central_series()
assert len(series) == 2
assert series[1] == AlternatingGroup(6)
def test_commutator():
# the commutator of the trivial group and the trivial group is trivial
S = SymmetricGroup(3)
triv = PermutationGroup([Permutation([0, 1, 2])])
assert S.commutator(triv, triv) == triv
# the commutator of the trivial group and any other group is again trivial
A = AlternatingGroup(3)
assert S.commutator(triv, A) == triv
# the commutator is commutative
for i in (3, 4, 5):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
D = DihedralGroup(i)
assert S.commutator(A, D) == S.commutator(D, A)
# the commutator of an abelian group is trivial
S = SymmetricGroup(7)
A1 = AbelianGroup(2, 5)
A2 = AbelianGroup(3, 4)
triv = PermutationGroup([Permutation([0, 1, 2, 3, 4, 5, 6])])
assert S.commutator(A1, A1) == triv
assert S.commutator(A2, A2) == triv
# examples calculated by hand
S = SymmetricGroup(3)
A = AlternatingGroup(3)
assert S.commutator(A, S) == A
def test_is_nilpotent():
# every abelian group is nilpotent
for i in (1, 2, 3):
C = CyclicGroup(i)
Ab = AbelianGroup(i, i + 2)
assert C.is_nilpotent
assert Ab.is_nilpotent
Ab = AbelianGroup(5, 7, 10)
assert Ab.is_nilpotent
# A_5 is not solvable and thus not nilpotent
assert AlternatingGroup(5).is_nilpotent == False
def test_is_trivial():
for i in range(5):
triv = PermutationGroup([Permutation(range(i))])
assert triv.is_trivial
def test_pointwise_stabilizer():
S = SymmetricGroup(5)
points = []
stab = S
for point in (2, 0, 3, 4, 1):
stab = stab.stabilizer(point)
points.append(point)
assert S.pointwise_stabilizer(points) == stab
| bsd-3-clause | 6,483,593,423,376,247,000 | 34.665025 | 80 | 0.581031 | false |
twschiller/open-synthesis | openach/migrations/0019_boardfollower.py | 1 | 1515 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-23 01:38
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("openach", "0018_auto_20160919_2318"),
]
operations = [
migrations.CreateModel(
name="BoardFollower",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("is_creator", models.BooleanField(default=False)),
("is_contributor", models.BooleanField(default=False)),
("is_evaluator", models.BooleanField(default=False)),
("update_timestamp", models.DateTimeField()),
(
"board",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="followers",
to="openach.Board",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="openach.Board"
),
),
],
),
]
| gpl-3.0 | 4,212,794,619,625,237,500 | 30.5625 | 87 | 0.433663 | false |
dwang159/iris-api | src/iris/metrics/influx.py | 2 | 1761 | # Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
# This is named 'influx' to avoid conflicting with the influxdb module
from datetime import datetime
import logging
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError
from requests.exceptions import RequestException
logger = logging.getLogger()
# pip install influxdb==3.0.0
class influx(object):
def __init__(self, config, appname):
try:
self.client = InfluxDBClient(**config['influxdb']['connect'])
self.enable_metrics = True
except KeyError:
logger.warning('Missing connect arguments for influxdb. Running with no metrics.')
self.enable_metrics = False
return
try:
self.extra_tags = config['influxdb']['tags']
except KeyError:
self.extra_tags = {}
self.appname = appname
def send_metrics(self, metrics):
if not self.enable_metrics:
return
now = str(datetime.now())
payload = []
for metric, value in metrics.iteritems():
data = {
'measurement': self.appname,
'tags': {},
'time': now,
'fields': {
metric: value
}
}
if self.extra_tags:
data['tags'].update(self.extra_tags)
payload.append(data)
try:
self.client.write_points(payload)
except (RequestException, InfluxDBClientError, InfluxDBServerError):
logger.exception('Failed to send metrics to influxdb')
| bsd-2-clause | 4,053,119,667,844,424,000 | 32.865385 | 99 | 0.602499 | false |
tylerbrockett/Alert-Bot-Reddit | src/bot_modules/inbox_handler.py | 1 | 12640 | """
==========================================
Author: Tyler Brockett
Username: /u/tylerbrockett
Description: Alert Bot
==========================================
"""
from utils import inbox, times
from bot_modules.sleep_handler import SleepHandler
from bot_modules.database_handler import DatabaseHandlerException
from utils.logger import Logger
from utils.color import Color
from utils.env import env, DEV_USERNAME
from utils.subscription import Subscription
from parsing.message_parser import MessageParser
from parsing.message_lexer import MessageLexer
import json
import traceback
class InboxHandler:
@staticmethod
def reply(message, response):
Logger.log('Replying to message: ' + message.id)
message.reply(response[0: min(len(response), 10000)])
@staticmethod
def handle_message_from_reddit(reddit, message):
Logger.log('Message from reddit')
reddit.send_message(env(DEV_USERNAME), 'FWD: ' + message.subject, message.body)
message.mark_read()
@staticmethod
def handle_statistics_message(database, message):
Logger.log('Stats message')
formatted_message = inbox.compose_statistics(
str(message.author),
database.count_current_users(),
database.count_all_users(),
database.count_unique_subscriptions(),
database.count_all_subscriptions(),
len(database.get_unique_subreddits()),
database.count_total_matches(),
database.get_unique_subreddits())
InboxHandler.reply(message, formatted_message)
message.mark_read()
@staticmethod
def handle_get_subscriptions_message(database, message):
Logger.log('Get subs message')
subscriptions = database.get_subscriptions_by_user(str(message.author))
formatted_message = inbox.compose_all_subscriptions_message(str(message.author), subscriptions)
InboxHandler.reply(message, formatted_message)
message.mark_read()
@staticmethod
def handle_subscription_message(database, reddit, message, payload):
Logger.log('Sub message')
new_sub = Subscription(payload, str(message.author), message.id)
existing_subs = database.get_subscriptions_by_user(str(message.author))
duplicate_subs = new_sub.check_against_existing(existing_subs)
if duplicate_subs:
Logger.log('Subscription already exists', Color.RED)
InboxHandler.reply(message, inbox.compose_duplicate_subscription_message(
str(message.author),
duplicate_subs[0],
new_sub))
message.mark_read()
return
invalid_subreddits = reddit.check_invalid_subreddits(new_sub.data[Subscription.SUBREDDITS])
if invalid_subreddits:
Logger.log('Subreddit(s) invalid: ' + str(invalid_subreddits), Color.RED)
InboxHandler.reply(message, inbox.compose_invalid_subreddit_message(str(message.author), invalid_subreddits, message))
message.mark_read()
return
database.insert_subscription(str(message.author), new_sub.message_id, new_sub.to_string(),
times.get_current_timestamp())
existing_subs.append(new_sub)
# TODO Remove subreddit not specified stuff, taken care of in SubscriptionParser.py
subreddit_not_specified = len(new_sub.data[Subscription.SUBREDDITS]) == 0
InboxHandler.reply(message,
inbox.compose_subscribe_message(str(message.author), new_sub, existing_subs, subreddit_not_specified))
database.commit()
message.mark_read()
@staticmethod
def handle_unsubscribe_message(reddit, database, message):
Logger.log('Unsub message')
parent_m_id = reddit.get_original_message_id(message, database)
removed_subs = database.remove_subscriptions_by_message_id(str(message.author), parent_m_id)
subs = database.get_subscriptions_by_user(str(message.author))
if len(removed_subs) > 0:
InboxHandler.reply(message, inbox.compose_unsubscribe_message(str(message.author), removed_subs, subs))
else:
InboxHandler.reply(message, inbox.compose_unsubscribe_invalid_sub_message(str(message.author), subs))
message.mark_read()
@staticmethod
def handle_unsubscribe_from_num_message(database, message, payload):
Logger.log('Unsub from num')
removed = database.remove_subscription_by_number(str(message.author), int(payload))
subs = database.get_subscriptions_by_user(str(message.author))
if removed:
InboxHandler.reply(message, inbox.compose_unsubscribe_from_num_message(str(message.author), removed, subs))
else:
InboxHandler.reply(message, inbox.compose_unsubscribe_invalid_sub_message(str(message.author), subs))
message.mark_read()
@staticmethod
def handle_edit_message(database, message, payload):
Logger.log('Edit message')
InboxHandler.reply(message, inbox.compose_edit_message(str(message.author)))
message.mark_read()
# TODO Handle if there are 0 subs for user
@staticmethod
def handle_unsubscribe_all_message(database, message):
Logger.log('Unsub all message')
removed_subscriptions = database.remove_all_subscriptions(str(message.author))
InboxHandler.reply(message, inbox.compose_unsubscribe_all_message(str(message.author)))
message.mark_read()
@staticmethod
def handle_help_message(database, message):
Logger.log('Help message')
subs = database.get_subscriptions_by_user(str(message.author))
InboxHandler.reply(message, inbox.compose_help_message(str(message.author), subs))
message.mark_read()
@staticmethod
def handle_feedback_message(reddit, message):
Logger.log('Feedback message')
reddit.send_message(
env(DEV_USERNAME),
'FEEDBACK',
inbox.compose_feedback_forward(env(DEV_USERNAME), str(message.author), message.body)
)
InboxHandler.reply(message, inbox.compose_feedback_message(str(message.author)))
message.mark_read()
@staticmethod
def handle_username_mention_message(reddit, message):
Logger.log('Username mention message')
try:
InboxHandler.reply(message, inbox.compose_username_mention_reply(str(message.author)))
message.mark_read()
reddit.send_message(
env(DEV_USERNAME),
'USERNAME MENTION',
inbox.compose_username_mention_forward(env(DEV_USERNAME), str(message.author), message.body)
)
except Exception as e: # Figure out more specific exception thrown (praw.exceptions.APIException?)
Logger.log(str(e), Color.RED)
Logger.log('Handled RateLimitExceeded praw error - Commenting too frequently', Color.RED)
@staticmethod
def handle_post_reply_message(reddit, message):
Logger.log('Post reply message')
reddit.send_message(
env(DEV_USERNAME),
'USERNAME MENTION',
inbox.compose_username_mention_forward(env(DEV_USERNAME), str(message.author), message.body)
)
message.mark_read()
@staticmethod
def handle_reject_message(reddit, message, error):
Logger.log('Handle reject message')
reject_message = inbox.compose_reject_message(str(message.author), message.subject, message.body, error)
InboxHandler.reply(message, reject_message)
message.mark_read()
reddit.send_message(
env(DEV_USERNAME),
'REJECT MESSAGE - ' + str(message.author),
reject_message
)
# TODO Add the ability to EDIT existing subscriptions
@staticmethod
def read_inbox(database, reddit):
Logger.log('Reading inbox...', Color.GREEN)
unread = []
try:
unread = reddit.get_unread()
except:
unread = []
num_messages = 0
for message in unread:
num_messages += 1
username = str(message.author).lower()
subject = inbox.format_subject(str(message.subject).lower())
body = str(message.body).lower()
try:
if username == 'reddit':
InboxHandler.handle_message_from_reddit(reddit, message)
elif subject == 'username mention':
InboxHandler.handle_username_mention_message(reddit, message)
elif subject == 'post reply':
InboxHandler.handle_post_reply_message(reddit, message)
elif subject in MessageLexer.feedback_keywords:
InboxHandler.handle_feedback_message(reddit, message)
elif subject in MessageLexer.help_keywords:
InboxHandler.handle_help_message(database, message)
else:
m = MessageParser(message)
action = m.data[MessageParser.KEY_ACTION]
error = m.data[MessageParser.KEY_ERROR]
payload = m.get_payload()
Logger.log(json.dumps(m.data, indent=2), Color.MAGENTA)
if error:
Logger.log(
'REJECT:\n' +
'Error:\t' + str(error) + '\n' +
'Subject:\t' + str(message.subject) + '\n' +
'Body:\t\t' + str(message.body)
)
InboxHandler.handle_reject_message(reddit, message, error)
elif action == MessageParser.ACTION_STATISTICS:
InboxHandler.handle_statistics_message(database, message)
elif action == MessageParser.ACTION_GET_SUBSCRIPTIONS:
InboxHandler.handle_get_subscriptions_message(database, message)
elif action == MessageParser.ACTION_UNSUBSCRIBE_ALL:
InboxHandler.handle_unsubscribe_all_message(database, message)
elif action == MessageParser.ACTION_UNSUBSCRIBE:
InboxHandler.handle_unsubscribe_message(reddit, database, message)
elif action == MessageParser.ACTION_UNSUBSCRIBE_FROM_NUM:
InboxHandler.handle_unsubscribe_from_num_message(database, message, payload)
elif action == MessageParser.ACTION_SUBSCRIBE:
InboxHandler.handle_subscription_message(database, reddit, message, payload)
elif action == MessageParser.ACTION_EDIT:
InboxHandler.handle_edit_message(database, message, payload)
elif action == MessageParser.ACTION_HELP:
InboxHandler.handle_help_message(database, message)
elif action == MessageParser.ACTION_FEEDBACK:
InboxHandler.handle_feedback_message(reddit, message)
except DatabaseHandlerException as ex:
Logger.log(traceback.format_exc(), Color.RED)
if ex.errorArgs == DatabaseHandlerException.INTEGRITY_ERROR:
message.mark_read()
reddit.send_message(env(DEV_USERNAME),
'Integrity Error',
'SUBJECT: ' + str(inbox.format_subject(message.subject)) + '\n\n' +
'BODY:\n' + str(message.body))
except:
Logger.log(traceback.format_exc(), Color.RED)
reddit.send_message(env(DEV_USERNAME),
'ERROR HANDLING MESSAGE - POTENTIALLY STUCK IN INBOX',
'Should NOT be seeing this message anymore hopefully...\t \n' +
'AUTHOR: /u/' + str(message.author) + '\t \n' +
'SUBJECT: ' + str(message.subject) + '\t \n' +
'BODY:\n' + str(message.body))
SleepHandler.sleep(2)
Logger.log(str(num_messages) + ' unread messages handled', Color.CYAN)
class InboxHandlerException(Exception):
READ_MESSAGES_EXCEPTION = 'Error reading messages'
def __init__(self, error_args, traceback=None):
Exception.__init__(self, 'InboxHandlerException: {0}'.format(error_args))
self.errorArgs = error_args
print(traceback)
| mit | -5,505,985,071,328,949,000 | 46.164179 | 130 | 0.607991 | false |
ahvigil/MultiQC | multiqc/modules/qualimap/QM_RNASeq.py | 1 | 6096 | #!/usr/bin/env python
""" MultiQC Submodule to parse output from Qualimap RNASeq """
from __future__ import print_function
from collections import OrderedDict
import logging
import re
from multiqc import config
from multiqc.plots import bargraph, linegraph
# Initialise the logger
log = logging.getLogger(__name__)
def parse_reports(self):
""" Find Qualimap RNASeq reports and parse their data """
sp = config.sp['qualimap']['rnaseq']
self.qualimap_rnaseq_genome_results = dict()
regexes = {
'reads_aligned': r"read(?:s| pairs) aligned\s*=\s*([\d,]+)",
'total_alignments': r"total alignments\s*=\s*([\d,]+)",
'non_unique_alignments': r"non-unique alignments\s*=\s*([\d,]+)",
'reads_aligned_genes': r"aligned to genes\s*=\s*([\d,]+)",
'ambiguous_alignments': r"ambiguous alignments\s*=\s*([\d,]+)",
'not_aligned': r"not aligned\s*=\s*([\d,]+)",
'5_3_bias': r"5'-3' bias\s*=\s*(\d+\.\d+)",
'reads_aligned_exonic': r"exonic\s*=\s*([\d,]+)",
'reads_aligned_intronic': r"intronic\s*=\s*([\d,]+)",
'reads_aligned_intergenic': r"intergenic\s*=\s*([\d,]+)",
'reads_aligned_overlapping_exon': r"overlapping exon\s*=\s*([\d,]+)",
}
for f in self.find_log_files(sp['rnaseq_results']):
d = dict()
# Get the sample name
s_name_regex = re.search(r"bam file\s*=\s*(.+)", f['f'], re.MULTILINE)
if s_name_regex:
d['bam_file'] = s_name_regex.group(1)
s_name = self.clean_s_name(d['bam_file'], f['root'])
else:
log.warn("Couldn't find an input filename in genome_results file {}/{}".format(f['root'], f['fn']))
return None
# Check for and 'fix' European style decimal places / thousand separators
comma_regex = re.search(r"exonic\s*=\s*[\d\.]+ \(\d{1,3},\d+%\)", f['f'], re.MULTILINE)
if comma_regex:
log.debug("Trying to fix European comma style syntax in Qualimap report {}/{}".format(f['root'], f['fn']))
f['f'] = f['f'].replace('.','')
f['f'] = f['f'].replace(',','.')
# Go through all numeric regexes
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',',''))
except UnicodeEncodeError:
# Qualimap reports infinity (\u221e) when 3' bias denominator is zero
pass
except ValueError:
d[k] = r_search.group(1)
# Add to general stats table
for k in ['5_3_bias', 'reads_aligned']:
try:
self.general_stats_data[s_name][k] = d[k]
except KeyError:
pass
# Save results
if s_name in self.qualimap_rnaseq_genome_results:
log.debug("Duplicate genome results sample name found! Overwriting: {}".format(s_name))
self.qualimap_rnaseq_genome_results[s_name] = d
self.add_data_source(f, s_name=s_name, section='rna_genome_results')
#### Coverage profile
self.qualimap_rnaseq_cov_hist = dict()
for f in self.find_log_files(sp['coverage'], filehandles=True):
s_name = self.get_s_name(f)
d = dict()
for l in f['f']:
if l.startswith('#'):
continue
coverage, count = l.split(None, 1)
coverage = int(round(float(coverage)))
count = float(count)
d[coverage] = count
if len(d) == 0:
log.debug("Couldn't parse contents of coverage histogram file {}".format(f['fn']))
return None
# Save results
if s_name in self.qualimap_rnaseq_cov_hist:
log.debug("Duplicate coverage histogram sample name found! Overwriting: {}".format(s_name))
self.qualimap_rnaseq_cov_hist[s_name] = d
self.add_data_source(f, s_name=s_name, section='rna_coverage_histogram')
#### Plots
# Genomic Origin Bar Graph
# NB: Ignore 'Overlapping Exon' in report - these make the numbers add up to > 100%
if len(self.qualimap_rnaseq_genome_results) > 0:
gorigin_cats = OrderedDict()
gorigin_cats['reads_aligned_exonic'] = {'name': 'Exonic'}
gorigin_cats['reads_aligned_intronic'] = {'name': 'Intronic'}
gorigin_cats['reads_aligned_intergenic'] = {'name': 'Intergenic'}
gorigin_pconfig = {
'id': 'qualimap_genomic_origin',
'title': 'Genomic Origin',
'cpswitch_c_active': False
}
self.sections.append({
'name': 'Genomic origin of reads',
'anchor': 'qualimap-reads-genomic-origin',
'content': bargraph.plot(self.qualimap_rnaseq_genome_results, gorigin_cats, gorigin_pconfig)
})
if len(self.qualimap_rnaseq_cov_hist) > 0:
self.sections.append({
'name': 'Gene Coverage Profile',
'anchor': 'qualimap-genome-fraction-coverage',
'content': linegraph.plot(self.qualimap_rnaseq_cov_hist, {
'id': 'qualimap_gene_coverage_profile',
'title': 'Coverage Profile Along Genes (total)',
'ylab': 'Coverage',
'xlab': 'Transcript Position (%)',
'ymin': 0,
'xmin': 0,
'xmax': 100,
'tt_label': '<b>{point.x} bp</b>: {point.y:.0f}%',
})
})
#### General Stats
self.general_stats_headers['5_3_bias'] = {
'title': "5'-3' bias",
'format': '{:.2f}',
}
self.general_stats_headers['reads_aligned'] = {
'title': '{} Aligned'.format(config.read_count_prefix),
'description': 'Reads Aligned ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count',
'modify': lambda x: x * config.read_count_multiplier
}
# Return the number of reports we found
return len(self.qualimap_rnaseq_genome_results.keys())
| gpl-3.0 | 6,180,485,285,409,714,000 | 38.584416 | 118 | 0.549541 | false |
Tsur/sos | server/desktop/models/users.py | 1 | 4179 | # STD Libraries
# Django Core Libraries
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.core.mail import send_mail
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.db import models
# Third-party Libraries
from phonenumber_field.modelfields import PhoneNumberField
# Django Local Applications Libraries
__all__ = ['UserModel']
class UserManager(BaseUserManager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves an EmailUser with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
is_active = extra_fields.pop("is_active", True)
user = self.model(email=email, is_staff=is_staff, is_active=is_active,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
is_staff = extra_fields.pop("is_staff", False)
return self._create_user(email, password, is_staff, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class UserModel(AbstractBaseUser, PermissionsMixin):
"""
Abstract User with the same behaviour as Django's default User but
without a username field. Uses email as the USERNAME_FIELD for
authentication.
Use this if you need to extend EmailUser.
Inherits from both the AbstractBaseUser and PermissionMixin.
The following attributes are inherited from the superclasses:
* password
* last_login
* is_superuser
"""
# name: required and not unique
name = models.CharField(_('name'), max_length=128)
# username: not required and unique
username = models.CharField(_('username'), max_length=32, unique=True, null=True, blank=True)
# phone: not required and unique
phone = PhoneNumberField(_('phone number'), unique=True, null=True, blank=True)
# email: required and unique
email = models.EmailField(_('email address'), max_length=255,
unique=True, db_index=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def save(self, *args, **kwargs):
if not self.username is None and not self.username.strip():
self.username = None
if not self.phone is None and not str(self.phone).strip():
self.phone = None
super(UserModel, self).save(*args, **kwargs)
def get_full_name(self):
"""
Returns the name.
"""
return self.name
def get_short_name(self):
"""
Returns the name.
"""
return self.name.split(' ')[0]
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def notify(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
app_label = 'desktop'
swappable = 'AUTH_USER_MODEL' | mit | -2,789,567,202,738,048,500 | 31.403101 | 97 | 0.619048 | false |
googleinterns/stampify | extraction/content_extractors/text_extractor.py | 1 | 1324 | """This script checks whether DOM has text tag or not and
creates and returns the Text object"""
import bs4
from data_models.text import Text
from extraction.content_extractors.interface_content_extractor import \
IContentExtractor
from extraction.utils import string_utils as utils
TEXT_TAGS = ['p', 'span', 'code', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']
MAX_CHILD = 3
class TextExtractor(IContentExtractor):
"""This class inherits IContentExtractor for extracting text"""
def validate_and_extract(self, node: bs4.element):
"""Validates if a tag is text tag and
returns the extracted data from the text tag in Text object"""
if isinstance(node, bs4.element.Tag):
text_data = node.get_text().strip()
if (node.name in TEXT_TAGS) \
and not utils.empty_text(text_data):
text_type = node.name
is_bold = (node.find('strong') or node.find('b')) \
and len(node.contents) <= MAX_CHILD
text_content = Text(text_data, text_type, is_bold)
return text_content
elif isinstance(node, bs4.element.NavigableString) \
and not utils.empty_text(node):
text_content = Text(node.strip())
return text_content
return None
| apache-2.0 | 3,093,648,934,173,986,000 | 35.777778 | 71 | 0.617825 | false |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_upgrade/changes_2007_04_11/test_do_convert_numarray_cache_to_numpy_cache.py | 2 | 6677 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os
import sys
import tempfile
import numpy
from shutil import rmtree
from opus_core.tests import opus_unittest
from opus_core.store.file_flt_storage import file_flt_storage
from opus_core.opus_package import OpusPackage
from opus_upgrade.changes_2007_04_11.do_convert_numarray_cache_to_numpy_cache import ConvertNumarrayCacheToNumpyCache
class TestDoConvertNumarrayCacheToNumpyCache(opus_unittest.OpusTestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp(prefix='opus_tmp_test_do_convert_numarray_cache_to_numpy_cache')
package = OpusPackage()
opus_package_path = package.get_path_for_package('opus_upgrade')
self.root_path = os.path.join(opus_package_path, 'changes_2007_04_11')
self.test_data_path = os.path.join(self.root_path, 'test_data')
def tearDown(self):
if os.path.exists(self.temp_dir):
rmtree(self.temp_dir)
def test_convert_files(self):
numarray_file_path = os.path.join(self.test_data_path, 'numarray_inputs', 'do_not_change_me.sometext')
numpy_file_path = os.path.join(self.test_data_path, 'numpy_inputs', 'do_not_change_me.sometext')
output_directory = self.temp_dir
convert = ConvertNumarrayCacheToNumpyCache()
convert.convert_file(os.path.join(self.test_data_path, 'numarray_inputs'), 'do_not_change_me.sometext', output_directory)
self.assert_(os.path.exists(os.path.join(output_directory, 'do_not_change_me.sometext')))
endian = file_flt_storage.storage_file(None)._get_native_endian_file_extension_character()
convert.convert_file(os.path.join(self.test_data_path, 'numarray_inputs'), 'f.Int32', output_directory)
self.assert_(os.path.exists(os.path.join(output_directory, 'f.%si4' % endian)))
convert.convert_file(os.path.join(self.test_data_path, 'numarray_inputs'), 'd.Float32', output_directory)
self.assert_(os.path.exists(os.path.join(output_directory, 'd.%sf4' % endian)))
convert.convert_file(os.path.join(self.test_data_path, 'numarray_inputs'), 'c.txt', output_directory)
self.assert_(os.path.exists(os.path.join(output_directory, 'c.iS7')))
# Does the file contain the expected data?
f = open(os.path.join(output_directory, 'c.iS7'), 'rb')
actual = f.read()
f.close()
f = open(os.path.join(self.test_data_path, 'numpy_inputs', 'c.iS7'), 'rb')
expected = f.read()
f.close()
self.assertEqual(expected, actual)
def test_copy_entire_cache_using_object(self):
directory_to_copy = os.path.join(self.test_data_path, 'numarray_inputs')
numpy_directory_containing_expected_data = os.path.join(self.test_data_path, 'numpy_inputs')
output_directory = self.temp_dir
converter = ConvertNumarrayCacheToNumpyCache()
converter.execute(directory_to_copy, output_directory)
self.assert_(self._are_directories_same(numpy_directory_containing_expected_data, output_directory))
# Should notice that empty directory is gone.
rmtree(os.path.join(output_directory, 'a'))
self.assert_(not self._are_directories_same(numpy_directory_containing_expected_data, output_directory))
# Make them the same again
converter.execute(directory_to_copy, output_directory)
# Then change contents of one of the files.
numpy.array([100]).tofile(os.path.join(output_directory, 'f.li4'))
self.assert_(not self._are_directories_same(numpy_directory_containing_expected_data, output_directory))
# Make them the same again
converter.execute(directory_to_copy, output_directory)
# Add directory in output_directory
os.mkdir(os.path.join(output_directory, 'new_dir'))
self.assert_(not self._are_directories_same(numpy_directory_containing_expected_data, output_directory))
def _are_directories_same (self, first_path, second_path):
return (self._is_first_directory_subset_of_second(first_path, second_path) and
self._is_first_directory_subset_of_second(second_path, first_path))
def _is_first_directory_subset_of_second(self, first_path, second_path):
files_and_directories_to_ignore = ['CVS', '.svn']
def files_have_same_data(first_path, second_path):
f = open(first_path)
first = f.read()
f.close()
f = open(second_path)
second = f.read()
f.close()
return first == second
for file_or_dir_name in os.listdir(first_path):
if file_or_dir_name in files_and_directories_to_ignore:
continue
first_file_or_dir_path = os.path.join(first_path, file_or_dir_name)
second_file_or_dir_path = os.path.join(second_path, file_or_dir_name)
if not os.path.exists(second_file_or_dir_path):
return False
if os.path.isfile(first_file_or_dir_path):
if not files_have_same_data(first_file_or_dir_path, second_file_or_dir_path):
return False
else:
if not self._is_first_directory_subset_of_second(first_file_or_dir_path, second_file_or_dir_path):
return False
return True
def deactivted_test_copy_entire_cache_using_command_line(self):
directory_to_copy = os.path.join(self.test_data_path, 'numarray_inputs')
numpy_directory_containing_expected_data = os.path.join(self.test_data_path, 'numpy_inputs')
output_directory = self.temp_dir
command_file_path = os.path.join(self.root_path, 'do_convert_numarray_cache_to_numpy_cache.py')
cmd = '%s %s --cache_files_directory %s --output_directory %s' % (
sys.executable,
command_file_path,
directory_to_copy,
output_directory,
)
print cmd
result = os.system(cmd)
self.assertEqual(0, result)
# Check that output files have correct data.
self.assert_(self._are_directories_same(numpy_directory_containing_expected_data, output_directory))
if __name__ == '__main__':
opus_unittest.main() | gpl-2.0 | 3,211,731,888,894,946,000 | 44.381944 | 129 | 0.623034 | false |
gorcz/mercurial | mercurial/templatekw.py | 2 | 16005 | # templatekw.py - common changeset template keywords
#
# Copyright 2005-2009 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex
import patch, util, error
import hbisect
# This helper class allows us to handle both:
# "{files}" (legacy command-line-specific list hack) and
# "{files % '{file}\n'}" (hgweb-style with inlining and function support)
# and to access raw values:
# "{ifcontains(file, files, ...)}", "{ifcontains(key, extras, ...)}"
# "{get(extras, key)}"
class _hybrid(object):
def __init__(self, gen, values, makemap, joinfmt=None):
self.gen = gen
self.values = values
self._makemap = makemap
if joinfmt:
self.joinfmt = joinfmt
else:
self.joinfmt = lambda x: x.values()[0]
def __iter__(self):
return self.gen
def __call__(self):
makemap = self._makemap
for x in self.values:
yield makemap(x)
def __contains__(self, x):
return x in self.values
def __len__(self):
return len(self.values)
def __getattr__(self, name):
if name != 'get':
raise AttributeError(name)
return getattr(self.values, name)
def showlist(name, values, plural=None, element=None, **args):
if not element:
element = name
f = _showlist(name, values, plural, **args)
return _hybrid(f, values, lambda x: {element: x})
def _showlist(name, values, plural=None, **args):
'''expand set of values.
name is name of key in template map.
values is list of strings or dicts.
plural is plural of name, if not simply name + 's'.
expansion works like this, given name 'foo'.
if values is empty, expand 'no_foos'.
if 'foo' not in template map, return values as a string,
joined by space.
expand 'start_foos'.
for each value, expand 'foo'. if 'last_foo' in template
map, expand it instead of 'foo' for last key.
expand 'end_foos'.
'''
templ = args['templ']
if plural:
names = plural
else: names = name + 's'
if not values:
noname = 'no_' + names
if noname in templ:
yield templ(noname, **args)
return
if name not in templ:
if isinstance(values[0], str):
yield ' '.join(values)
else:
for v in values:
yield dict(v, **args)
return
startname = 'start_' + names
if startname in templ:
yield templ(startname, **args)
vargs = args.copy()
def one(v, tag=name):
try:
vargs.update(v)
except (AttributeError, ValueError):
try:
for a, b in v:
vargs[a] = b
except ValueError:
vargs[name] = v
return templ(tag, **vargs)
lastname = 'last_' + name
if lastname in templ:
last = values.pop()
else:
last = None
for v in values:
yield one(v)
if last is not None:
yield one(last, tag=lastname)
endname = 'end_' + names
if endname in templ:
yield templ(endname, **args)
def getfiles(repo, ctx, revcache):
if 'files' not in revcache:
revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
return revcache['files']
def getlatesttags(repo, ctx, cache):
'''return date, distance and name for the latest tag of rev'''
if 'latesttags' not in cache:
# Cache mapping from rev to a tuple with tag date, tag
# distance and tag name
cache['latesttags'] = {-1: (0, 0, 'null')}
latesttags = cache['latesttags']
rev = ctx.rev()
todo = [rev]
while todo:
rev = todo.pop()
if rev in latesttags:
continue
ctx = repo[rev]
tags = [t for t in ctx.tags()
if (repo.tagtype(t) and repo.tagtype(t) != 'local')]
if tags:
latesttags[rev] = ctx.date()[0], 0, ':'.join(sorted(tags))
continue
try:
# The tuples are laid out so the right one can be found by
# comparison.
pdate, pdist, ptag = max(
latesttags[p.rev()] for p in ctx.parents())
except KeyError:
# Cache miss - recurse
todo.append(rev)
todo.extend(p.rev() for p in ctx.parents())
continue
latesttags[rev] = pdate, pdist + 1, ptag
return latesttags[rev]
def getrenamedfn(repo, endrev=None):
rcache = {}
if endrev is None:
endrev = len(repo)
def getrenamed(fn, rev):
'''looks up all renames for a file (up to endrev) the first
time the file is given. It indexes on the changerev and only
parses the manifest if linkrev != changerev.
Returns rename info for fn at changerev rev.'''
if fn not in rcache:
rcache[fn] = {}
fl = repo.file(fn)
for i in fl:
lr = fl.linkrev(i)
renamed = fl.renamed(fl.node(i))
rcache[fn][lr] = renamed
if lr >= endrev:
break
if rev in rcache[fn]:
return rcache[fn][rev]
# If linkrev != rev (i.e. rev not found in rcache) fallback to
# filectx logic.
try:
return repo[rev][fn].renamed()
except error.LookupError:
return None
return getrenamed
def showauthor(repo, ctx, templ, **args):
""":author: String. The unmodified author of the changeset."""
return ctx.user()
def showbisect(repo, ctx, templ, **args):
""":bisect: String. The changeset bisection status."""
return hbisect.label(repo, ctx.node())
def showbranch(**args):
""":branch: String. The name of the branch on which the changeset was
committed.
"""
return args['ctx'].branch()
def showbranches(**args):
""":branches: List of strings. The name of the branch on which the
changeset was committed. Will be empty if the branch name was
default.
"""
branch = args['ctx'].branch()
if branch != 'default':
return showlist('branch', [branch], plural='branches', **args)
return showlist('branch', [], plural='branches', **args)
def showbookmarks(**args):
""":bookmarks: List of strings. Any bookmarks associated with the
changeset.
"""
repo = args['ctx']._repo
bookmarks = args['ctx'].bookmarks()
current = repo._bookmarkcurrent
makemap = lambda v: {'bookmark': v, 'current': current}
f = _showlist('bookmark', bookmarks, **args)
return _hybrid(f, bookmarks, makemap, lambda x: x['bookmark'])
def showchildren(**args):
""":children: List of strings. The children of the changeset."""
ctx = args['ctx']
childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()]
return showlist('children', childrevs, element='child', **args)
def showcurrentbookmark(**args):
""":currentbookmark: String. The active bookmark, if it is
associated with the changeset"""
import bookmarks as bookmarks # to avoid circular import issues
repo = args['repo']
if bookmarks.iscurrent(repo):
current = repo._bookmarkcurrent
if current in args['ctx'].bookmarks():
return current
return ''
def showdate(repo, ctx, templ, **args):
""":date: Date information. The date when the changeset was committed."""
return ctx.date()
def showdescription(repo, ctx, templ, **args):
""":desc: String. The text of the changeset description."""
return ctx.description().strip()
def showdiffstat(repo, ctx, templ, **args):
""":diffstat: String. Statistics of changes with the following format:
"modified files: +added/-removed lines"
"""
stats = patch.diffstatdata(util.iterlines(ctx.diff()))
maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
return '%s: +%s/-%s' % (len(stats), adds, removes)
def showextras(**args):
""":extras: List of dicts with key, value entries of the 'extras'
field of this changeset."""
extras = args['ctx'].extra()
extras = util.sortdict((k, extras[k]) for k in sorted(extras))
makemap = lambda k: {'key': k, 'value': extras[k]}
c = [makemap(k) for k in extras]
f = _showlist('extra', c, plural='extras', **args)
return _hybrid(f, extras, makemap,
lambda x: '%s=%s' % (x['key'], x['value']))
def showfileadds(**args):
""":file_adds: List of strings. Files added by this changeset."""
repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
return showlist('file_add', getfiles(repo, ctx, revcache)[1],
element='file', **args)
def showfilecopies(**args):
""":file_copies: List of strings. Files copied in this changeset with
their sources.
"""
cache, ctx = args['cache'], args['ctx']
copies = args['revcache'].get('copies')
if copies is None:
if 'getrenamed' not in cache:
cache['getrenamed'] = getrenamedfn(args['repo'])
copies = []
getrenamed = cache['getrenamed']
for fn in ctx.files():
rename = getrenamed(fn, ctx.rev())
if rename:
copies.append((fn, rename[0]))
copies = util.sortdict(copies)
makemap = lambda k: {'name': k, 'source': copies[k]}
c = [makemap(k) for k in copies]
f = _showlist('file_copy', c, plural='file_copies', **args)
return _hybrid(f, copies, makemap,
lambda x: '%s (%s)' % (x['name'], x['source']))
# showfilecopiesswitch() displays file copies only if copy records are
# provided before calling the templater, usually with a --copies
# command line switch.
def showfilecopiesswitch(**args):
""":file_copies_switch: List of strings. Like "file_copies" but displayed
only if the --copied switch is set.
"""
copies = args['revcache'].get('copies') or []
copies = util.sortdict(copies)
makemap = lambda k: {'name': k, 'source': copies[k]}
c = [makemap(k) for k in copies]
f = _showlist('file_copy', c, plural='file_copies', **args)
return _hybrid(f, copies, makemap,
lambda x: '%s (%s)' % (x['name'], x['source']))
def showfiledels(**args):
""":file_dels: List of strings. Files removed by this changeset."""
repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
return showlist('file_del', getfiles(repo, ctx, revcache)[2],
element='file', **args)
def showfilemods(**args):
""":file_mods: List of strings. Files modified by this changeset."""
repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
return showlist('file_mod', getfiles(repo, ctx, revcache)[0],
element='file', **args)
def showfiles(**args):
""":files: List of strings. All files modified, added, or removed by this
changeset.
"""
return showlist('file', args['ctx'].files(), **args)
def showlatesttag(repo, ctx, templ, cache, **args):
""":latesttag: String. Most recent global tag in the ancestors of this
changeset.
"""
return getlatesttags(repo, ctx, cache)[2]
def showlatesttagdistance(repo, ctx, templ, cache, **args):
""":latesttagdistance: Integer. Longest path to the latest tag."""
return getlatesttags(repo, ctx, cache)[1]
def showmanifest(**args):
repo, ctx, templ = args['repo'], args['ctx'], args['templ']
mnode = ctx.manifestnode()
args = args.copy()
args.update({'rev': repo.manifest.rev(mnode), 'node': hex(mnode)})
return templ('manifest', **args)
def shownode(repo, ctx, templ, **args):
""":node: String. The changeset identification hash, as a 40 hexadecimal
digit string.
"""
return ctx.hex()
def showp1rev(repo, ctx, templ, **args):
""":p1rev: Integer. The repository-local revision number of the changeset's
first parent, or -1 if the changeset has no parents."""
return ctx.p1().rev()
def showp2rev(repo, ctx, templ, **args):
""":p2rev: Integer. The repository-local revision number of the changeset's
second parent, or -1 if the changeset has no second parent."""
return ctx.p2().rev()
def showp1node(repo, ctx, templ, **args):
""":p1node: String. The identification hash of the changeset's first parent,
as a 40 digit hexadecimal string. If the changeset has no parents, all
digits are 0."""
return ctx.p1().hex()
def showp2node(repo, ctx, templ, **args):
""":p2node: String. The identification hash of the changeset's second
parent, as a 40 digit hexadecimal string. If the changeset has no second
parent, all digits are 0."""
return ctx.p2().hex()
def showphase(repo, ctx, templ, **args):
""":phase: String. The changeset phase name."""
return ctx.phasestr()
def showphaseidx(repo, ctx, templ, **args):
""":phaseidx: Integer. The changeset phase index."""
return ctx.phase()
def showrev(repo, ctx, templ, **args):
""":rev: Integer. The repository-local changeset revision number."""
return ctx.rev()
def showsubrepos(**args):
""":subrepos: List of strings. Updated subrepositories in the changeset."""
ctx = args['ctx']
substate = ctx.substate
if not substate:
return showlist('subrepo', [], **args)
psubstate = ctx.parents()[0].substate or {}
subrepos = []
for sub in substate:
if sub not in psubstate or substate[sub] != psubstate[sub]:
subrepos.append(sub) # modified or newly added in ctx
for sub in psubstate:
if sub not in substate:
subrepos.append(sub) # removed in ctx
return showlist('subrepo', sorted(subrepos), **args)
def shownames(namespace, **args):
"""helper method to generate a template keyword for a namespace"""
ctx = args['ctx']
repo = ctx.repo()
ns = repo.names[namespace]
names = ns.names(repo, ctx.node())
return showlist(ns.templatename, names, plural=namespace, **args)
# don't remove "showtags" definition, even though namespaces will put
# a helper function for "tags" keyword into "keywords" map automatically,
# because online help text is built without namespaces initialization
def showtags(**args):
""":tags: List of strings. Any tags associated with the changeset."""
return shownames('tags', **args)
# keywords are callables like:
# fn(repo, ctx, templ, cache, revcache, **args)
# with:
# repo - current repository instance
# ctx - the changectx being displayed
# templ - the templater instance
# cache - a cache dictionary for the whole templater run
# revcache - a cache dictionary for the current revision
keywords = {
'author': showauthor,
'bisect': showbisect,
'branch': showbranch,
'branches': showbranches,
'bookmarks': showbookmarks,
'children': showchildren,
'currentbookmark': showcurrentbookmark,
'date': showdate,
'desc': showdescription,
'diffstat': showdiffstat,
'extras': showextras,
'file_adds': showfileadds,
'file_copies': showfilecopies,
'file_copies_switch': showfilecopiesswitch,
'file_dels': showfiledels,
'file_mods': showfilemods,
'files': showfiles,
'latesttag': showlatesttag,
'latesttagdistance': showlatesttagdistance,
'manifest': showmanifest,
'node': shownode,
'p1rev': showp1rev,
'p1node': showp1node,
'p2rev': showp2rev,
'p2node': showp2node,
'phase': showphase,
'phaseidx': showphaseidx,
'rev': showrev,
'subrepos': showsubrepos,
'tags': showtags,
}
def _showparents(**args):
""":parents: List of strings. The parents of the changeset in "rev:node"
format. If the changeset has only one "natural" parent (the predecessor
revision) nothing is shown."""
pass
dockeywords = {
'parents': _showparents,
}
dockeywords.update(keywords)
del dockeywords['branches']
# tell hggettext to extract docstrings from these functions:
i18nfunctions = dockeywords.values()
| gpl-2.0 | 4,792,943,687,673,580,000 | 33.345494 | 80 | 0.616182 | false |
virtualopensystems/neutron | neutron/agent/linux/external_process.py | 3 | 3671 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
cfg.CONF.register_opts(OPTS)
class ProcessManager(object):
"""An external process manager for Neutron spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, root_helper='sudo',
namespace=None, service=None):
self.conf = conf
self.uuid = uuid
self.root_helper = root_helper
self.namespace = namespace
if service:
self.service_pid_fname = 'pid.' + service
else:
self.service_pid_fname = 'pid'
def enable(self, cmd_callback, reload_cfg=False):
if not self.active:
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
elif reload_cfg:
self.reload_cfg()
def reload_cfg(self):
self.disable('HUP')
def disable(self, sig='9'):
pid = self.pid
if self.active:
cmd = ['kill', '-%s' % (sig), pid]
utils.execute(cmd, self.root_helper)
# In the case of shutting down, remove the pid file
if sig == '9':
utils.remove_conf_file(self.conf.external_pids,
self.uuid,
self.service_pid_fname)
elif pid:
LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'command'), {'uuid': self.uuid, 'pid': pid})
else:
LOG.debug(_('No process started for %s'), self.uuid)
def get_pid_file_name(self, ensure_pids_dir=False):
"""Returns the file name for a given kind of config file."""
return utils.get_conf_file_name(self.conf.external_pids,
self.uuid,
self.service_pid_fname,
ensure_pids_dir)
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
return utils.get_value_from_conf_file(self.conf.external_pids,
self.uuid,
self.service_pid_fname,
int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.uuid in f.readline()
except IOError:
return False
| apache-2.0 | 2,475,695,555,353,490,000 | 33.961905 | 78 | 0.560065 | false |
sanjanalab/GUIDES | static/data/pre_processed/precompute_guides_msgpack_CFD+FD.py | 2 | 16590 | import msgpack
import json
import pickle
import os.path
from Queue import PriorityQueue
import re
import doench_score
import azimuth.model_comparison
import numpy as np
import pandas as pd
import csv
from intervaltree import IntervalTree
from multiprocessing import Process
import os
import time
start_time = time.time()
#Reverse complements a given string
def revcom(s):
basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A','U':'A', 'N':'N'}
letters = list(s[::-1])
letters = [basecomp[base] for base in letters]
return ''.join(letters)
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, seq_before, seq_after, chrom, cut_pos, score, exon_ranking, ensembl_gene, gene_name, functional_domain, has_exome_repeat, off_target_score):
self.start = start
self.seq = seq
self.PAM = PAM
self.seq_before = seq_before # 10bp before the sgRNA
self.seq_after = seq_after # 10bp after the sgRNA
self.chrom = chrom
self.cut_pos = cut_pos
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
self.functional_domain = functional_domain
if functional_domain:
self.has_functional_domain = True
else:
self.has_functional_domain = False
self.has_exome_repeat = has_exome_repeat
self.off_target_score = off_target_score
if off_target_score == 'inf':
self.off_target_score = 10000
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
serialization = {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"seq_before": self.seq_before,
"seq_after": self.seq_after,
"chrom": self.chrom,
"cut_pos": self.cut_pos,
"selected": self.selected,
"has_exome_repeat": self.has_exome_repeat,
"off_target_score": self.off_target_score,
"has_functional_domain": self.has_functional_domain
}
if self.functional_domain != None:
serialization["functional_domain"] = self.functional_domain
return serialization
def cmp_scheme(self, g):
return (-g.off_target_score, g.has_functional_domain, g.score)
def __cmp__(self, other):
return cmp(self.cmp_scheme(self), self.cmp_scheme(other))
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"scoring": "Azimuth",
"quantity": 100,
"functional_domains": True,
"mer_len": 20
}
# azimuth model
print "loading azimuth models", time.time() - start_time
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
azimuth_scores_file = 'azimuth_scores.p'
with open(azimuth_scores_file, 'rb') as inp:
azimuth_scores = pickle.load(inp)
def get_azimuth_score(mer30):
if mer30 in azimuth_scores:
return azimuth_scores[mer30]
else:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
print "generating Azimuth", mer30, score
azimuth_scores[mer30] = score
return score
# load in exome
APP_STATIC = "/home/joshm/GUIDES/CRISPR-Library-Designer/static"
exome_seq_path = os.path.join(APP_STATIC, 'data', 'GRCh37_exons')
mer_len = params['mer_len']
# process kmers
# consider all kmers which are followed by NGG
print "preparing hum kmers", time.time() - start_time
exome_mers = {}
for file in os.listdir(exome_seq_path):
file_loc = os.path.join(exome_seq_path, file)
with open(file_loc, 'r') as file_data:
fwdseq = file_data.read()
revseq = revcom(fwdseq)
for seq in [fwdseq, revseq]:
for i in range(len(seq) - mer_len - 2):
s = seq[i: i + mer_len]
if seq[i + mer_len + 1 : i + mer_len + 3] != "GG": # only PAMs
continue
if 'N' in s:
continue
if s in exome_mers:
exome_mers[s] += 1
else:
exome_mers[s] = 1
print 'len(exome_mers) = ', len(exome_mers), time.time() - start_time
# takes in guide OBJECT
# returns whether there is a duplicate in exome
def hasExomeRepeat(protospacer):
guide_seq = protospacer[-mer_len:] # get PAM-proximal mer_len bases
hits = exome_mers[guide_seq] # how many times does occur in genome followed by NGG?
return hits >= 2
# loading CFD preprocessed
#Unpickle mismatch scores and PAM scores
def get_mm_pam_scores():
try:
mm_scores = pickle.load(open('mismatch_score.pkl','rb'))
pam_scores = pickle.load(open('pam_scores.pkl','rb'))
return (mm_scores,pam_scores)
except:
raise Exception("Could not find file with mismatch scores or PAM scores")
#Calculates CFD score
def calc_cfd(wt,sg,pam):
mm_scores,pam_scores = get_mm_pam_scores()
score = 1
sg = sg.replace('T','U')
wt = wt.replace('T','U')
s_list = list(sg)
wt_list = list(wt)
for i,sl in enumerate(s_list):
if wt_list[i] == sl:
score*=1
else:
key = 'r'+wt_list[i]+':d'+revcom(sl)+','+str(i+1)
score*= mm_scores[key]
score*=pam_scores[pam]
return (score)
def get_pot_off_targets(seq):
seq_list = list(seq)
backup_seq_list = list(seq)
nts = ['A','T','C','G']
results = {}
for a in range(len(seq)):
for a_sym in nts:
seq_list[a] = a_sym
for b in range(a + 1, len(seq)):
for b_sym in nts:
seq_list[b] = b_sym
for c in range(b + 1, len(seq)):
for c_sym in nts:
seq_list[c] = c_sym
new_seq = ''.join(seq_list)
results[new_seq] = True
seq_list[c] = backup_seq_list[c]
seq_list[b] = backup_seq_list[b]
seq_list[a] = backup_seq_list[a]
if seq in results:
del results[seq]
return results.keys()
# load preprocessed info
with open("off_target_scores.p", "rb") as inp:
off_target_scores = pickle.load(inp)
print 'len(off_target_scores) = ', len(off_target_scores), time.time() - start_time
def get_off_target_score(protospacer):
if hasExomeRepeat(protospacer):
return 100000
if not protospacer in off_target_scores:
score = 0
off_targets = get_pot_off_targets(protospacer)
for off_target in off_targets:
if off_target in exome_mers:
wt = protospacer + "CGG"
sg = off_target
pam = "GG"
score += exome_mers[off_target] * calc_cfd(wt, sg, pam)
off_target_scores[protospacer] = score
return off_target_scores[protospacer]
# Create interval tree for functional domains
print "constructing interval tuples", time.time() - start_time
interval_tuples_dict = {}
ucsc_pfam_f = '../functional_domains/ucsc_pfam.txt'
with open(ucsc_pfam_f, 'r') as pfam_csv:
csvreader = csv.reader(pfam_csv, delimiter='\t')
next(csvreader) # skip header
for row in csvreader:
chrom = row[1]
start = row[2]
end = row[3]
name = row[4]
if chrom not in interval_tuples_dict:
interval_tuples_dict[chrom] = []
new_tuple = (int(start), int(end), name)
interval_tuples_dict[chrom].append(new_tuple)
print "constructing interval trees", time.time() - start_time
interval_trees_dict = {}
for k, v in interval_tuples_dict.iteritems():
interval_trees_dict[k] = IntervalTree.from_tuples(v)
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
print "constructing refGene", time.time() - start_time
refGeneFilename = '../gtex/refGene.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
def gene_exon_coords(gene, exon):
try:
start = list(refGene.loc[refGene['name'] == gene]['exonStarts'])[0][exon]
end = list(refGene.loc[refGene['name'] == gene]['exonEnds'])[0][exon]
chrom = list(refGene.loc[refGene['name'] == gene]['chrom'])[0]
return {
'start': int(start),
'end': int(end),
'chrom': str(chrom)
}
except IndexError:
return None
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
with open("/home/joshm/GUIDES/CRISPR-Library-Designer/static/data/pre_processed/exon_info.p", "rb") as f:
exon_info = pickle.load(f)
def get_exon_start_chrom(gene, exon):
# get the row from the exon_info dataframe
row = exon_info[exon_info['name'] == gene].iloc[0]
# find where the exon starts
start = row['exonStarts'][exon]
# find the chromosome this falls in
chrom = str(row['chrom'])
if chrom.isdigit():
chrom = str(int(chrom)) # get rid of decimal place
return start, chrom
# this is run on multiprocessing workflow
def run(genes_list):
for gene in genes_list:
exon = 0
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
while seq:
# Check if we haven't done this in a previous run of the program
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../cfdGRCh37_guides_msgpack_' + params["scoring"] + '/'
if params['functional_domains']:
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
if os.path.isfile(output_path):
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
continue
q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq, domain):
if 'N' in seq:
return
PAM_start = m.start()
score = 0
if params["scoring"] == "Doench":
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
elif params["scoring"] == "Azimuth":
# Azimuth requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = get_azimuth_score(mer30)
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
protospacer_before = seq[PAM_start-params["protospacer_len"]-10:PAM_start-params["protospacer_len"]]
protospacer_after = seq[PAM_start:PAM_start+10]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
protospacer_before = seq[PAM_start+params["PAM_len"]-10:PAM_start+params["PAM_len"]]
protospacer_after = seq[PAM_start+params["PAM_len"]+params["protospacer_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]+10]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
if protospacer not in exome_mers:
print protospacer, 'NOT in exome_mers', gene["ensembl_id"], exon
print 'PAM is', seq[PAM_start:PAM_start+params["PAM_len"]]
has_exome_repeat = hasExomeRepeat(protospacer)
off_target_score = get_off_target_score(protospacer)
exon_start, chrom = get_exon_start_chrom(gene["ensembl_id"], exon)
cut_pos = exon_start + PAM_start
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, protospacer_before, protospacer_after, chrom, cut_pos, score, exon, gene["ensembl_id"], gene["name"], domain, has_exome_repeat, off_target_score)
# If there's enough room, add it, no question.
if q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if cmp(potential_gRNA, lowest_gRNA) == 1: # if potential_gRNA > lowest_gRNA
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] + 4 or m.start() + params["PAM_len"] + 6 > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": # spCas9
cut_site = coords['start'] + m.start() - 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq, domain)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] + 4 or m.start() + params["PAM_len"] + 6 > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": #spCas9
cut_site = coords['end'] - m.start() + 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq_rc, domain)
# Pop gRNAs into our 'permanent' storage
gRNAs = []
while not q.empty():
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '/'
if params['functional_domains']:
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
with open(output_path, 'w') as outfile:
# Reverse gRNAs list.
# Want highest on-target first.
msgpack.dump(gRNAs[::-1], outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
NUM_CORES = 16
print "beginning gene by gene processing", time.time() - start_time
with open('genes_list.json') as genes_list_file:
full_genes_list = json.load(genes_list_file)
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
processes = []
unit = len(full_genes_list) / NUM_CORES + 1
print 'unit is', unit, time.time() - start_time
for i in range(NUM_CORES):
start = unit * i
end = min(unit * (i + 1), len(full_genes_list))
genes_list = full_genes_list[start:end]
p = Process(target = run, args=(genes_list,))
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
with open('azimuth_scores.p', 'wb') as output:
pickle.dump(azimuth_scores, output)
end_time = time.time()
hours, rem = divmod(end_time-start_time, 3600)
minutes, seconds = divmod(rem, 60)
print "time elapsed"
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
| bsd-3-clause | -2,047,981,963,186,979,000 | 36.197309 | 244 | 0.620374 | false |
shodimaggio/SaivDr | appendix/pytorch/nsoltAtomExtension2dLayer.py | 1 | 4566 | import torch
import torch.nn as nn
import torch.autograd as autograd
from nsoltLayerExceptions import InvalidDirection, InvalidTargetChannels
class NsoltAtomExtension2dLayer(nn.Module):
"""
NSOLTATOMEXTENSION2DLAYER
コンポーネント別に入力(nComponents=1のみサポート):
nSamples x nRows x nCols x nChsTotal
コンポーネント別に出力(nComponents=1のみサポート):
nSamples x nRows x nCols x nChsTotal
Requirements: Python 3.7.x, PyTorch 1.7.x
Copyright (c) 2020-2021, Shogo MURAMATSU
All rights reserved.
Contact address: Shogo MURAMATSU,
Faculty of Engineering, Niigata University,
8050 2-no-cho Ikarashi, Nishi-ku,
Niigata, 950-2181, JAPAN
http://msiplab.eng.niigata-u.ac.jp/
"""
def __init__(self,
name='',
number_of_channels=[],
direction='',
target_channels=''):
super(NsoltAtomExtension2dLayer, self).__init__()
self.number_of_channels = number_of_channels
self.name = name
# Target channels
if target_channels in { 'Sum', 'Difference' }:
self.target_channels = target_channels
else:
raise InvalidTargetChannels(
'%s : Target should be either of Sum or Difference'\
% self.direction
)
# Shift direction
if direction in { 'Right', 'Left', 'Down', 'Up' }:
self.direction = direction
else:
raise InvalidDirection(
'%s : Direction should be either of Right, Left, Down or Up'\
% self.direction
)
# Description
self.description = direction \
+ " shift the " \
+ target_channels.lower() \
+ "-channel Coefs. " \
+ "(ps,pa) = (" \
+ str(number_of_channels[0]) + "," \
+ str(number_of_channels[1]) + ")"
self.type = ''
def forward(self,X):
# Number of channels
nchs = torch.tensor(self.number_of_channels,dtype=torch.int)
# Target channels
if self.target_channels == 'Difference':
target = torch.tensor((0,))
else:
target = torch.tensor((1,))
# Shift direction
if self.direction == 'Right':
shift = torch.tensor(( 0, 0, 1, 0 ))
elif self.direction == 'Left':
shift = torch.tensor(( 0, 0, -1, 0 ))
elif self.direction == 'Down':
shift = torch.tensor(( 0, 1, 0, 0 ))
else:
shift = torch.tensor(( 0, -1, 0, 0 ))
# Atom extension function
atomext = AtomExtension2d.apply
return atomext(X,nchs,target,shift)
class AtomExtension2d(autograd.Function):
@staticmethod
def forward(ctx, input, nchs, target, shift):
ctx.mark_non_differentiable(nchs,target,shift)
ctx.save_for_backward(nchs,target,shift)
# Block butterfly
X = block_butterfly(input,nchs)
# Block shift
X = block_shift(X,nchs,target,shift)
# Block butterfly
return block_butterfly(X,nchs)/2.
@staticmethod
def backward(ctx, grad_output):
nchs,target,shift = ctx.saved_tensors
grad_input = grad_nchs = grad_target = grad_shift = None
if ctx.needs_input_grad[0]:
# Block butterfly
X = block_butterfly(grad_output,nchs)
# Block shift
X = block_shift(X,nchs,target,-shift)
# Block butterfly
grad_input = block_butterfly(X,nchs)/2.
if ctx.needs_input_grad[1]:
grad_nchs = torch.zeros_like(nchs)
if ctx.needs_input_grad[2]:
grad_target = torch.zeros_like(target)
if ctx.needs_input_grad[3]:
grad_shift = torch.zeros_like(shift)
return grad_input, grad_nchs, grad_target, grad_shift
def block_butterfly(X,nchs):
"""
Block butterfly
"""
ps = nchs[0]
Xs = X[:,:,:,:ps]
Xa = X[:,:,:,ps:]
return torch.cat((Xs+Xa,Xs-Xa),dim=-1)
def block_shift(X,nchs,target,shift):
"""
Block shift
"""
ps = nchs[0]
if target == 0: # Difference channel
X[:,:,:,ps:] = torch.roll(X[:,:,:,ps:],shifts=tuple(shift.tolist()),dims=(0,1,2,3))
else: # Sum channel
X[:,:,:,:ps] = torch.roll(X[:,:,:,:ps],shifts=tuple(shift.tolist()),dims=(0,1,2,3))
return X | bsd-2-clause | 1,967,853,115,146,456,000 | 31.601449 | 91 | 0.545576 | false |
luckylavish/zamboni | mkt/api/tests/test_serializer.py | 19 | 3187 | # -*- coding: utf-8 -*-
from django.core.handlers.wsgi import WSGIRequest
from django.test import TestCase
from django.test.client import RequestFactory
import mock
from nose.tools import eq_, ok_
from rest_framework.serializers import Serializer, ValidationError
from mkt.api.serializers import PotatoCaptchaSerializer, URLSerializerMixin
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
class TestPotatoCaptchaSerializer(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.request = mock.Mock()
self.request.META = {}
self.request.user = mock.Mock()
self.context = {'request': self.request}
self.request.user.is_authenticated = lambda: False
self.data = {'tuber': '', 'sprout': 'potato'}
def test_success_authenticated(self):
self.request.user = UserProfile.objects.get(id=999)
self.request.user.is_authenticated = lambda: True
serializer = PotatoCaptchaSerializer(data={}, context=self.context)
eq_(serializer.is_valid(), True)
def test_success_anonymous(self):
data = {'tuber': '', 'sprout': 'potato'}
serializer = PotatoCaptchaSerializer(data=data, context=self.context)
eq_(serializer.is_valid(), True)
def test_no_context(self):
data = {'tuber': '', 'sprout': 'potato'}
with self.assertRaises(ValidationError):
PotatoCaptchaSerializer(data=data)
def test_error_anonymous_bad_tuber(self):
data = {'tuber': 'HAMMMMMMMMMMMMM', 'sprout': 'potato'}
serializer = PotatoCaptchaSerializer(data=data, context=self.context)
eq_(serializer.is_valid(), False)
def test_error_anonymous_bad_sprout(self):
data = {'tuber': 'HAMMMMMMMMMMMMM', 'sprout': ''}
serializer = PotatoCaptchaSerializer(data=data, context=self.context)
eq_(serializer.is_valid(), False)
def test_error_anonymous_bad_tuber_and_sprout(self):
serializer = PotatoCaptchaSerializer(data={}, context=self.context)
eq_(serializer.is_valid(), False)
class TestURLSerializerMixin(TestCase):
SerializerClass = type('Potato', (URLSerializerMixin, Serializer),
{'Meta': None})
Struct = type('Struct', (object,), {})
url_basename = 'potato'
def setUp(self):
self.SerializerClass.Meta = type('Meta', (self.Struct,),
{'model': UserProfile,
'url_basename': self.url_basename})
self.request = RequestFactory().get('/')
self.request.API_VERSION = 1
self.serializer = self.SerializerClass(
context={'request': self.request})
self.obj = self.Struct()
self.obj.pk = 42
@mock.patch('mkt.api.serializers.reverse')
def test_get_url(self, mock_reverse):
self.serializer.get_url(self.obj)
reverse_args, reverse_kwargs = mock_reverse.call_args
ok_(mock_reverse.called)
eq_(reverse_args[0], '%s-detail' % self.url_basename)
eq_(type(reverse_kwargs['request']), WSGIRequest)
eq_(reverse_kwargs['kwargs']['pk'], self.obj.pk)
| bsd-3-clause | -6,630,730,032,490,068,000 | 38.345679 | 77 | 0.640728 | false |
memtoko/django | tests/pagination/tests.py | 60 | 13250 | from __future__ import unicode_literals
import unittest
from datetime import datetime
from django.core.paginator import (
EmptyPage, InvalidPage, PageNotAnInteger, Paginator,
)
from django.test import TestCase
from django.utils import six
from .custom import ValidAdjacentNumsPaginator
from .models import Article
class PaginationTests(unittest.TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params, coerce=list)
def check_attribute(self, name, paginator, expected, params, coerce=None):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
if coerce is not None:
got = coerce(got)
self.assertEqual(expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params))
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def test_invalid_page_number(self):
"""
Tests that invalid page numbers result in the correct exception being
raised.
"""
paginator = Paginator([1, 2, 3], 2)
self.assertRaises(InvalidPage, paginator.page, 3)
self.assertRaises(PageNotAnInteger, paginator.validate_number, None)
self.assertRaises(PageNotAnInteger, paginator.validate_number, 'x')
# With no content and allow_empty_first_page=True, 1 is a valid page number
paginator = Paginator([], 2)
self.assertEqual(paginator.validate_number(1), 1)
def test_paginate_misc_classes(self):
class CountContainer(object):
def count(self):
return 42
# Paginator can be passed other objects with a count() method.
paginator = Paginator(CountContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
# Paginator can be passed other objects that implement __len__.
class LenContainer(object):
def __len__(self):
return 42
paginator = Paginator(LenContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s."
" Paginator parameters were: %s")
self.assertEqual(start, page.start_index(),
msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(),
msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Tests that paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 0, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 1, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
Tests that a paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertIn('k', page2)
self.assertNotIn('a', page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
def test_get_page_hook(self):
"""
Tests that a Paginator subclass can use the ``_get_page`` hook to
return an alternative to the standard Page class.
"""
eleven = 'abcdefghijk'
paginator = ValidAdjacentNumsPaginator(eleven, per_page=6)
page1 = paginator.page(1)
page2 = paginator.page(2)
self.assertIsNone(page1.previous_page_number())
self.assertEqual(page1.next_page_number(), 2)
self.assertEqual(page2.previous_page_number(), 1)
self.assertIsNone(page2.next_page_number())
class ModelPaginationTests(TestCase):
"""
Test pagination with Django model instances
"""
def setUp(self):
# Prepare a list of objects for pagination.
for x in range(1, 10):
a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
a.save()
def test_first_page(self):
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(1)
self.assertEqual("<Page 1 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 1>",
"<Article: Article 2>",
"<Article: Article 3>",
"<Article: Article 4>",
"<Article: Article 5>"
],
ordered=False
)
self.assertTrue(p.has_next())
self.assertFalse(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertEqual(2, p.next_page_number())
self.assertRaises(InvalidPage, p.previous_page_number)
self.assertEqual(1, p.start_index())
self.assertEqual(5, p.end_index())
def test_last_page(self):
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(2)
self.assertEqual("<Page 2 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 6>",
"<Article: Article 7>",
"<Article: Article 8>",
"<Article: Article 9>"
],
ordered=False
)
self.assertFalse(p.has_next())
self.assertTrue(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertRaises(InvalidPage, p.next_page_number)
self.assertEqual(1, p.previous_page_number())
self.assertEqual(6, p.start_index())
self.assertEqual(9, p.end_index())
def test_page_getitem(self):
"""
Tests proper behavior of a paginator page __getitem__ (queryset
evaluation, slicing, exception raised).
"""
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(1)
# Make sure object_list queryset is not evaluated by an invalid __getitem__ call.
# (this happens from the template engine when using eg: {% page_obj.has_previous %})
self.assertIsNone(p.object_list._result_cache)
self.assertRaises(TypeError, lambda: p['has_previous'])
self.assertIsNone(p.object_list._result_cache)
self.assertNotIsInstance(p.object_list, list)
# Make sure slicing the Page object with numbers and slice objects work.
self.assertEqual(p[0], Article.objects.get(headline='Article 1'))
self.assertQuerysetEqual(p[slice(2)], [
"<Article: Article 1>",
"<Article: Article 2>",
]
)
# After __getitem__ is called, object_list is a list
self.assertIsInstance(p.object_list, list)
| bsd-3-clause | -3,168,654,423,479,534,600 | 41.604502 | 92 | 0.522415 | false |
hujiajie/chromium-crosswalk | tools/grit/grit/format/policy_templates/writers/adm_writer_unittest.py | 41 | 31861 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.policy_templates.writers.adm_writer'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
from grit.format.policy_templates.writers import writer_unittest_common
class AdmWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests for AdmWriter.'''
def ConstructOutput(self, classes, body, strings):
result = []
for clazz in classes:
result.append('CLASS ' + clazz)
result.append(body)
result.append(strings)
return ''.join(result)
def CompareOutputs(self, output, expected_output):
'''Compares the output of the adm_writer with its expected output.
Args:
output: The output of the adm writer as returned by grit.
expected_output: The expected output.
Raises:
AssertionError: if the two strings are not equivalent.
'''
self.assertEquals(
output.strip(),
expected_output.strip().replace('\n', '\r\n'))
def testEmpty(self):
# Test PListWriter in case of empty polices.
grd = self.PrepareTest('''
{
'policy_definitions': [],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least "Windows 3.11', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_chromium': '1',}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least "Windows 3.11"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"''')
self.CompareOutputs(output, expected_output)
def testVersionAnnotation(self):
# Test PListWriter in case of empty polices.
grd = self.PrepareTest('''
{
'policy_definitions': [],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least "Windows 3.11', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(
grd, 'fr', {'_chromium': '1', 'version':'39.0.0.0'}, 'adm', 'en')
expected_output = '; chromium version: 39.0.0.0\n' + \
self.ConstructOutput(['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least "Windows 3.11"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"''')
self.CompareOutputs(output, expected_output)
def testMainPolicy(self):
# Tests a policy group with a single policy of type 'main'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'MainPolicy',
'type': 'main',
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
'caption': 'Caption of main.',
'desc': 'Description of main.',
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.12', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!google
CATEGORY !!googlechrome
KEYNAME "Software\\Policies\\Google\\Chrome"
POLICY !!MainPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!MainPolicy_Explain
VALUENAME "MainPolicy"
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END POLICY
END CATEGORY
END CATEGORY
CATEGORY !!google
CATEGORY !!googlechrome_recommended
KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
POLICY !!MainPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!MainPolicy_Explain
VALUENAME "MainPolicy"
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END POLICY
END CATEGORY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.12"
google="Google"
googlechrome="Google Chrome"
googlechrome_recommended="Google Chrome - Recommended"
MainPolicy_Policy="Caption of main."
MainPolicy_Explain="Description of main."''')
self.CompareOutputs(output, expected_output)
def testMainPolicyRecommendedOnly(self):
# Tests a policy group with a single policy of type 'main'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'MainPolicy',
'type': 'main',
'supported_on': ['chrome.win:8-'],
'features': {
'can_be_recommended': True,
'can_be_mandatory': False
},
'caption': 'Caption of main.',
'desc': 'Description of main.',
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.12', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!google
CATEGORY !!googlechrome
KEYNAME "Software\\Policies\\Google\\Chrome"
END CATEGORY
END CATEGORY
CATEGORY !!google
CATEGORY !!googlechrome_recommended
KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
POLICY !!MainPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!MainPolicy_Explain
VALUENAME "MainPolicy"
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END POLICY
END CATEGORY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.12"
google="Google"
googlechrome="Google Chrome"
googlechrome_recommended="Google Chrome - Recommended"
MainPolicy_Policy="Caption of main."
MainPolicy_Explain="Description of main."''')
self.CompareOutputs(output, expected_output)
def testStringPolicy(self):
# Tests a policy group with a single policy of type 'string'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'StringPolicy',
'type': 'string',
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
'desc': """Description of group.
With a newline.""",
'caption': 'Caption of policy.',
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.13', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
POLICY !!StringPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!StringPolicy_Explain
PART !!StringPolicy_Part EDITTEXT
VALUENAME "StringPolicy"
MAXLEN 1000000
END PART
END POLICY
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
POLICY !!StringPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!StringPolicy_Explain
PART !!StringPolicy_Part EDITTEXT
VALUENAME "StringPolicy"
MAXLEN 1000000
END PART
END POLICY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.13"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"
StringPolicy_Policy="Caption of policy."
StringPolicy_Explain="Description of group.\\nWith a newline."
StringPolicy_Part="Caption of policy."
''')
self.CompareOutputs(output, expected_output)
def testIntPolicy(self):
# Tests a policy group with a single policy of type 'string'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'IntPolicy',
'type': 'int',
'caption': 'Caption of policy.',
'features': { 'can_be_recommended': True },
'desc': 'Description of policy.',
'supported_on': ['chrome.win:8-']
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.13', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
POLICY !!IntPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!IntPolicy_Explain
PART !!IntPolicy_Part NUMERIC
VALUENAME "IntPolicy"
MIN 0 MAX 2000000000
END PART
END POLICY
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
POLICY !!IntPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!IntPolicy_Explain
PART !!IntPolicy_Part NUMERIC
VALUENAME "IntPolicy"
MIN 0 MAX 2000000000
END PART
END POLICY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.13"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"
IntPolicy_Policy="Caption of policy."
IntPolicy_Explain="Description of policy."
IntPolicy_Part="Caption of policy."
''')
self.CompareOutputs(output, expected_output)
def testIntEnumPolicy(self):
# Tests a policy group with a single policy of type 'int-enum'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'EnumPolicy',
'type': 'int-enum',
'items': [
{
'name': 'ProxyServerDisabled',
'value': 0,
'caption': 'Option1',
},
{
'name': 'ProxyServerAutoDetect',
'value': 1,
'caption': 'Option2',
},
],
'desc': 'Description of policy.',
'caption': 'Caption of policy.',
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.14', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!google
CATEGORY !!googlechrome
KEYNAME "Software\\Policies\\Google\\Chrome"
POLICY !!EnumPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!EnumPolicy_Explain
PART !!EnumPolicy_Part DROPDOWNLIST
VALUENAME "EnumPolicy"
ITEMLIST
NAME !!ProxyServerDisabled_DropDown VALUE NUMERIC 0
NAME !!ProxyServerAutoDetect_DropDown VALUE NUMERIC 1
END ITEMLIST
END PART
END POLICY
END CATEGORY
END CATEGORY
CATEGORY !!google
CATEGORY !!googlechrome_recommended
KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
POLICY !!EnumPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!EnumPolicy_Explain
PART !!EnumPolicy_Part DROPDOWNLIST
VALUENAME "EnumPolicy"
ITEMLIST
NAME !!ProxyServerDisabled_DropDown VALUE NUMERIC 0
NAME !!ProxyServerAutoDetect_DropDown VALUE NUMERIC 1
END ITEMLIST
END PART
END POLICY
END CATEGORY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.14"
google="Google"
googlechrome="Google Chrome"
googlechrome_recommended="Google Chrome - Recommended"
EnumPolicy_Policy="Caption of policy."
EnumPolicy_Explain="Description of policy."
EnumPolicy_Part="Caption of policy."
ProxyServerDisabled_DropDown="Option1"
ProxyServerAutoDetect_DropDown="Option2"
''')
self.CompareOutputs(output, expected_output)
def testStringEnumPolicy(self):
# Tests a policy group with a single policy of type 'int-enum'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'EnumPolicy',
'type': 'string-enum',
'caption': 'Caption of policy.',
'desc': 'Description of policy.',
'items': [
{'name': 'ProxyServerDisabled', 'value': 'one',
'caption': 'Option1'},
{'name': 'ProxyServerAutoDetect', 'value': 'two',
'caption': 'Option2'},
],
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.14', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!google
CATEGORY !!googlechrome
KEYNAME "Software\\Policies\\Google\\Chrome"
POLICY !!EnumPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!EnumPolicy_Explain
PART !!EnumPolicy_Part DROPDOWNLIST
VALUENAME "EnumPolicy"
ITEMLIST
NAME !!ProxyServerDisabled_DropDown VALUE "one"
NAME !!ProxyServerAutoDetect_DropDown VALUE "two"
END ITEMLIST
END PART
END POLICY
END CATEGORY
END CATEGORY
CATEGORY !!google
CATEGORY !!googlechrome_recommended
KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
POLICY !!EnumPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!EnumPolicy_Explain
PART !!EnumPolicy_Part DROPDOWNLIST
VALUENAME "EnumPolicy"
ITEMLIST
NAME !!ProxyServerDisabled_DropDown VALUE "one"
NAME !!ProxyServerAutoDetect_DropDown VALUE "two"
END ITEMLIST
END PART
END POLICY
END CATEGORY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.14"
google="Google"
googlechrome="Google Chrome"
googlechrome_recommended="Google Chrome - Recommended"
EnumPolicy_Policy="Caption of policy."
EnumPolicy_Explain="Description of policy."
EnumPolicy_Part="Caption of policy."
ProxyServerDisabled_DropDown="Option1"
ProxyServerAutoDetect_DropDown="Option2"
''')
self.CompareOutputs(output, expected_output)
def testListPolicy(self):
# Tests a policy group with a single policy of type 'list'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'ListPolicy',
'type': 'list',
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
'desc': """Description of list policy.
With a newline.""",
'caption': 'Caption of list policy.',
'label': 'Label of list policy.'
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.15', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
},
}''')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
POLICY !!ListPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!ListPolicy_Explain
PART !!ListPolicy_Part LISTBOX
KEYNAME "Software\\Policies\\Chromium\\ListPolicy"
VALUEPREFIX ""
END PART
END POLICY
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
POLICY !!ListPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!ListPolicy_Explain
PART !!ListPolicy_Part LISTBOX
KEYNAME "Software\\Policies\\Chromium\\Recommended\\ListPolicy"
VALUEPREFIX ""
END PART
END POLICY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.15"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"
ListPolicy_Policy="Caption of list policy."
ListPolicy_Explain="Description of list policy.\\nWith a newline."
ListPolicy_Part="Label of list policy."
''')
self.CompareOutputs(output, expected_output)
def testStringEnumListPolicy(self):
# Tests a policy group with a single policy of type 'string-enum-list'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'ListPolicy',
'type': 'string-enum-list',
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
'desc': """Description of list policy.
With a newline.""",
'items': [
{'name': 'ProxyServerDisabled', 'value': 'one',
'caption': 'Option1'},
{'name': 'ProxyServerAutoDetect', 'value': 'two',
'caption': 'Option2'},
],
'caption': 'Caption of list policy.',
'label': 'Label of list policy.'
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.15', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
},
}''')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
POLICY !!ListPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!ListPolicy_Explain
PART !!ListPolicy_Part LISTBOX
KEYNAME "Software\\Policies\\Chromium\\ListPolicy"
VALUEPREFIX ""
END PART
END POLICY
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
POLICY !!ListPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!ListPolicy_Explain
PART !!ListPolicy_Part LISTBOX
KEYNAME "Software\\Policies\\Chromium\\Recommended\\ListPolicy"
VALUEPREFIX ""
END PART
END POLICY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.15"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"
ListPolicy_Policy="Caption of list policy."
ListPolicy_Explain="Description of list policy.\\nWith a newline."
ListPolicy_Part="Label of list policy."
''')
self.CompareOutputs(output, expected_output)
def testDictionaryPolicy(self):
# Tests a policy group with a single policy of type 'dict'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'DictionaryPolicy',
'type': 'dict',
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
'desc': 'Description of group.',
'caption': 'Caption of policy.',
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.13', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
POLICY !!DictionaryPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!DictionaryPolicy_Explain
PART !!DictionaryPolicy_Part EDITTEXT
VALUENAME "DictionaryPolicy"
MAXLEN 1000000
END PART
END POLICY
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
POLICY !!DictionaryPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!DictionaryPolicy_Explain
PART !!DictionaryPolicy_Part EDITTEXT
VALUENAME "DictionaryPolicy"
MAXLEN 1000000
END PART
END POLICY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.13"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"
DictionaryPolicy_Policy="Caption of policy."
DictionaryPolicy_Explain="Description of group."
DictionaryPolicy_Part="Caption of policy."
''')
self.CompareOutputs(output, expected_output)
def testNonSupportedPolicy(self):
# Tests a policy that is not supported on Windows, so it shouldn't
# be included in the ADM file.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'NonWinGroup',
'type': 'group',
'policies': [{
'name': 'NonWinPolicy',
'type': 'list',
'supported_on': ['chrome.linux:8-', 'chrome.mac:8-'],
'caption': 'Caption of list policy.',
'desc': 'Desc of list policy.',
}],
'caption': 'Group caption.',
'desc': 'Group description.',
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.16', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.16"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"
''')
self.CompareOutputs(output, expected_output)
def testNonRecommendedPolicy(self):
# Tests a policy that is not recommended, so it should be included.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'MainPolicy',
'type': 'main',
'supported_on': ['chrome.win:8-'],
'caption': 'Caption of main.',
'desc': 'Description of main.',
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.12', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!google
CATEGORY !!googlechrome
KEYNAME "Software\\Policies\\Google\\Chrome"
POLICY !!MainPolicy_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!MainPolicy_Explain
VALUENAME "MainPolicy"
VALUEON NUMERIC 1
VALUEOFF NUMERIC 0
END POLICY
END CATEGORY
END CATEGORY
CATEGORY !!google
CATEGORY !!googlechrome_recommended
KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
END CATEGORY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.12"
google="Google"
googlechrome="Google Chrome"
googlechrome_recommended="Google Chrome - Recommended"
MainPolicy_Policy="Caption of main."
MainPolicy_Explain="Description of main."''')
self.CompareOutputs(output, expected_output)
def testPolicyGroup(self):
# Tests a policy group that has more than one policies.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'Group1',
'type': 'group',
'desc': 'Description of group.',
'caption': 'Caption of group.',
'policies': [{
'name': 'Policy1',
'type': 'list',
'supported_on': ['chrome.win:8-'],
'features': { 'can_be_recommended': True },
'caption': 'Caption of policy1.',
'desc': """Description of policy1.
With a newline."""
},{
'name': 'Policy2',
'type': 'string',
'supported_on': ['chrome.win:8-'],
'caption': 'Caption of policy2.',
'desc': """Description of policy2.
With a newline."""
}],
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.16', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!chromium
KEYNAME "Software\\Policies\\Chromium"
CATEGORY !!Group1_Category
POLICY !!Policy1_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!Policy1_Explain
PART !!Policy1_Part LISTBOX
KEYNAME "Software\\Policies\\Chromium\\Policy1"
VALUEPREFIX ""
END PART
END POLICY
POLICY !!Policy2_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!Policy2_Explain
PART !!Policy2_Part EDITTEXT
VALUENAME "Policy2"
MAXLEN 1000000
END PART
END POLICY
END CATEGORY
END CATEGORY
CATEGORY !!chromium_recommended
KEYNAME "Software\\Policies\\Chromium\\Recommended"
CATEGORY !!Group1_Category
POLICY !!Policy1_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!Policy1_Explain
PART !!Policy1_Part LISTBOX
KEYNAME "Software\\Policies\\Chromium\\Recommended\\Policy1"
VALUEPREFIX ""
END PART
END POLICY
END CATEGORY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.16"
chromium="Chromium"
chromium_recommended="Chromium - Recommended"
Group1_Category="Caption of group."
Policy1_Policy="Caption of policy1."
Policy1_Explain="Description of policy1.\\nWith a newline."
Policy1_Part="Caption of policy1."
Policy2_Policy="Caption of policy2."
Policy2_Explain="Description of policy2.\\nWith a newline."
Policy2_Part="Caption of policy2."
''')
self.CompareOutputs(output, expected_output)
def testDuplicatedStringEnumPolicy(self):
# Verifies that duplicated enum constants get merged, and that
# string constants get escaped.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'EnumPolicy.A',
'type': 'string-enum',
'caption': 'Caption of policy A.',
'desc': 'Description of policy A.',
'items': [
{'name': 'tls1.2', 'value': 'tls1.2', 'caption': 'tls1.2' },
],
'supported_on': ['chrome.win:39-'],
},
{
'name': 'EnumPolicy.B',
'type': 'string-enum',
'caption': 'Caption of policy B.',
'desc': 'Description of policy B.',
'items': [
{'name': 'tls1.2', 'value': 'tls1.2', 'caption': 'tls1.2' },
],
'supported_on': ['chrome.win:39-'],
},
],
'placeholders': [],
'messages': {
'win_supported_winxpsp2': {
'text': 'At least Windows 3.14', 'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended', 'desc': 'bleh'
}
}
}''')
output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'adm', 'en')
expected_output = self.ConstructOutput(
['MACHINE', 'USER'], '''
CATEGORY !!google
CATEGORY !!googlechrome
KEYNAME "Software\\Policies\\Google\\Chrome"
POLICY !!EnumPolicy_A_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!EnumPolicy_A_Explain
PART !!EnumPolicy_A_Part DROPDOWNLIST
VALUENAME "EnumPolicy.A"
ITEMLIST
NAME !!tls1_2_DropDown VALUE "tls1.2"
END ITEMLIST
END PART
END POLICY
POLICY !!EnumPolicy_B_Policy
#if version >= 4
SUPPORTED !!SUPPORTED_WINXPSP2
#endif
EXPLAIN !!EnumPolicy_B_Explain
PART !!EnumPolicy_B_Part DROPDOWNLIST
VALUENAME "EnumPolicy.B"
ITEMLIST
NAME !!tls1_2_DropDown VALUE "tls1.2"
END ITEMLIST
END PART
END POLICY
END CATEGORY
END CATEGORY
CATEGORY !!google
CATEGORY !!googlechrome_recommended
KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
END CATEGORY
END CATEGORY
''', '''[Strings]
SUPPORTED_WINXPSP2="At least Windows 3.14"
google="Google"
googlechrome="Google Chrome"
googlechrome_recommended="Google Chrome - Recommended"
EnumPolicy_A_Policy="Caption of policy A."
EnumPolicy_A_Explain="Description of policy A."
EnumPolicy_A_Part="Caption of policy A."
tls1_2_DropDown="tls1.2"
EnumPolicy_B_Policy="Caption of policy B."
EnumPolicy_B_Explain="Description of policy B."
EnumPolicy_B_Part="Caption of policy B."
''')
self.CompareOutputs(output, expected_output)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 7,711,468,013,090,224,000 | 27.27063 | 77 | 0.570541 | false |
nabobalis/pyhht | pyhht/EMD.py | 1 | 8737 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from scipy import interpolate
from scipy.stats import pearsonr
__all__ = ['emd']
def exterma(data, extenstion='extrema', n=2):
"""
Takes an 1D array and finds exterma points.
"""
N = data.shape[0]
min_env = np.zeros(N)
max_env = min_env.copy()
min_env = np.logical_and(
np.r_[True, data[1:] < data[:-1]],
np.r_[data[:-1] < data[1:], True])
max_env = np.logical_and(
np.r_[True, data[1:] > data[:-1]],
np.r_[data[:-1] > data[1:], True])
max_env[-1] = min_env[0] = False
min_env = min_env.nonzero()[0]
max_env = max_env.nonzero()[0]
data_min = data[min_env]
data_max = data[max_env]
min_arr = np.array([min_env, data_min])
max_arr = np.array([max_env, data_max])
if min_env.shape[0] <= 2 or max_env.shape[0] <= 2:
#If this IMF has become a straight line
pass
elif extenstion == 'extrema':
left_min = np.zeros([2,n])
right_min = np.zeros([2, n])
left_max = np.zeros([2,n])
right_max = np.zeros([2, n])
for i in range(1, n+1):
left_max[:, i-1] = [-1*min_env[n-i], data_max[n-i]]
left_min[:, i-1] = [-1*max_env[n-i], data_min[n-i]]
right_max[:, i-1] = [2*N - min_env[-i], data_max[-i]]
right_min[:, i-1] = [2*N - max_env[-i], data_min[-i]]
min_arr = np.concatenate([left_min, min_arr, right_min], axis=1)
max_arr = np.concatenate([left_max, max_arr, right_max], axis=1)
else:
min_arr = np.array([min_env, data_min])
max_arr = np.array([max_env, data_max])
return min_arr, max_arr
def envelope(min_arr, max_arr, N, periodic=0):
#Cubic Spline by default
order_max = 3
order_min = 3
min_arr = np.asarray(min_arr)
max_arr = np.asarray(max_arr)
if min_arr.shape[1] < 4:
order_min = 1 #Do linear interpolation if not enough points
elif min_arr.shape[1] < 5:
order_min = 2 #Do quad interpolation if not enough points
else:
order_min = 3
if max_arr.shape[1] < 4:
order_max = 1 #Do linear interpolation if not enough points
elif max_arr.shape[1] < 5:
order_max = 2 #Do quad interpolation if not enough points
else:
order_max = 3
# Mirror Method requires per flag = 1
# No extrapolation requires per flag = 0
t = interpolate.splrep(*min_arr, k=order_min, per=periodic)
top = interpolate.splev(np.arange(N), t)
b = interpolate.splrep(*max_arr, k=order_max, per=periodic)
bot = interpolate.splev(np.arange(N), b)
mean = (top + bot)/2
return mean
def emd(data, nimfs=12, extrapolation='mirror', n=2,
shifting_distance=0.2, pearson=True):
"""
Perform a Empirical Mode Decomposition on a data set.
This function will return an array of all the Imperical Mode Functions as
defined in [1]_, which can be used for further Hilbert Spectral Analysis.
The EMD uses a spline interpolation function to approcimate the upper and
lower envelopes of the signal, this routine implements a extrapolation
routine as described in [2]_ as well as the standard spline routine.
The extrapolation method removes the artifacts introduced by the spline fit
at the ends of the data set, by making the dataset a continuious circle.
Many thousands of papers have been published with ideas to improve the EMD
procress. One is paper [3]_, that is used for the exterma mirror.
Parameters
----------
data : array_like
Signal Data 1-D array.
extrapolation : str, optional
Sets the extrapolation method for edge effects.
Options: None
'mirror'
'extrema'
Default: 'mirror'
n: int, optional
Sets the number of points used for the exterma mirror method.
nimfs : int, optional
Sets the maximum number of IMFs to be found
Default : 12
stopping: string, optional,
Sets the method used to stop the sifting process.
None: Standard EMD equation .....
TBA1: Second standard EMD equation ....
resoultion: comes from ref [3]_. Need to set the parmeter res!
shifiting_distance : float, optional
Sets the minimum variance between IMF iterations.
Default : 0.2
res : float, optional
stuff from ref [3]_ it is in dB
Returns
-------
IMFs : ndarray
An array of shape (len(data),N) where N is the number of found IMFs
Notes
-----
References
----------
.. [1] Huang H. et al. 1998 'The empirical mode decomposition and the
Hilbert spectrum for nonlinear and non-stationary time series analysis.'
Procedings of the Royal Society 454, 903-995
.. [2] Zhao J., Huang D. 2001 'Mirror extending and circular spline
function for empirical mode decomposition method'
Journal of Zhejiang University (Science) V.2, No.3,P247-252
.. [3] Rato R.T., Ortigueira M.D., Batista A.G 2008 'On the HHT,
its problems, and some solutions.'
Mechanical Systems and Signal Processing 22 1374-1394
"""
#Set up signals array and IMFs array based on type of extrapolation
# No extrapolation and 'extend' use signals array which is len(data)
# Mirror extrapolation (Zhao 2001) uses a signal array len(2*data)
if extrapolation == 'mirror':
#Set up base
base = len(data)
nimfs = range(nimfs) # Max number of IMFs
IMFs = np.zeros([base, len(nimfs)])
ncomp = 0
residual = data
#Signals is 2*base
signals = np.zeros([base*2, 2])
#Mirror Dataset
signals[0:base / 2, 0] = data[::-1][base / 2:]
signals[base / 2:base + base / 2, 0] = data
signals[base + base / 2:base * 2, 0] = data[::-1][0:base / 2]
# Redfine base as len(signals) for IMFs
base = len(signals)
data_length = len(data) # Data length is used in recovering input data
#Do spline fitting with periodic bounds
periodic = 1
else:
base = len(data)
signals = np.zeros([base, 2])
nimfs = range(nimfs) # Max number of IMFs
IMFs = np.zeros([base, len(nimfs)])
ncomp = 0
residual = data
signals[:, 0] = data
if extrapolation=='exterma':
periodic = 1
else:
#Don't do spline fitting with periodic bounds
periodic = 0
for j in nimfs:
# Extract at most nimfs IMFs no more IMFs to be found if Finish is True
k = 0
sd = 1
finish = False
while sd > shifting_distance and not(finish):
#EMD magic here.
min_arr, max_arr = exterma(signals[:,0])
if min_arr.shape[1] <= 2 or max_arr.shape[1] <= 2:
#If this IMF has become a straight line
finish = True
else:
mean = envelope(min_arr, max_arr, base, periodic)
if not(pearson):
alpha = 1
else:
alpha = pearsonr(signals[:,0],mean)[0]
signals[:,1] = signals[:,0] - alpha*mean
#Calculate the shifting distance which is a measure of
#simulartity to previous IMF
if k > 0:
sd = np.sum((np.abs(signals[:,0] - signals[:,1])**2)) / np.sum(signals[:,0]**2)
signals = signals[:,::-1]
k += 1
if finish:
#If IMF is a straight line we are done here.
IMFs[:,j]= residual
ncomp += 1
break
elif extrapolation == 'mirror':
IMFs[:,j] = signals[data_length / 2:data_length
+ data_length / 2,0]
residual = residual - IMFs[:,j]#For j==0 residual is initially data
#Mirror case requires IMF subtraction from data range then
# re-mirroring for each IMF
signals[0:data_length / 2,0] = residual[::-1][data_length / 2:]
signals[data_length / 2:data_length + data_length / 2,0] = residual
signals[data_length
+ data_length / 2:,0] = residual[::-1][0:data_length / 2]
ncomp += 1
else:
IMFs[:,j] = signals[:,0]
residual = residual - IMFs[:,j]#For j==0 residual is initially data
signals[:,0] = residual
ncomp += 1
return IMFs[:, 0:ncomp] | bsd-2-clause | -2,863,552,198,583,403,500 | 34.811475 | 103 | 0.563008 | false |
Viele/onionSkinRenderer | 2017_2018/onionSkinRenderer/controller.py | 1 | 24018 | import pymel.core as pm
import maya.cmds as cmds
import os
import json
import inspect
from PySide2 import QtWidgets, QtCore
import maya.OpenMayaUI as omui
from shiboken2 import wrapInstance
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import onionSkinRenderer.core as core
import onionSkinRenderer.ui_window as ui_window
import onionSkinRenderer.wdgt_Frame as wdgt_Frame
import onionSkinRenderer.wdgt_MeshListObj as wdgt_MeshListObj
import onionSkinRenderer.wdgt_Preferences as wdgt_Preferences
import onionSkinRenderer.core_clearRender as clearRender
import onionSkinRenderer.core_hudRender as hudRender
import onionSkinRenderer.core_presentTarget as presentTarget
import onionSkinRenderer.core_quadRender as quadRender
import onionSkinRenderer.core_sceneRender as sceneRender
'''
2017, 2018 and 2019 Version
using pyside2
'''
'''
Naming Conventions:
Global variables: are in caps, seperated by "_"
os: abbreviation for onion skin
osr: abbreviation for onion skin renderer
'''
DEBUG_ALL = False
# wrapper to get mayas main window
def getMayaMainWindow():
mayaPtr = omui.MQtUtil.mainWindow()
return wrapInstance(long(mayaPtr), QtWidgets.QWidget)
# global variable holding the instance of the window
OSR_WINDOW = None
# convenient function to open the osr ui
def show(develop = False, dockable = False):
if develop:
reload(core)
reload(clearRender)
reload(hudRender)
reload(presentTarget)
reload(quadRender)
reload(sceneRender)
reload(wdgt_Frame)
reload(ui_window)
reload(wdgt_MeshListObj)
reload(wdgt_Preferences)
try:
OSR_WINDOW.close()
except:
pass
OSR_WINDOW = OSRController()
# if somebody reads this because they want to make it dockable
# please contact me. I'd like to have it dockable as well
# but it just never works
OSR_WINDOW.show(dockable = False)
'''
ONION SKIN RENDERER MAIN UI
This class is the main ui window. It manages all user events and links to the core
'''
class OSRController(MayaQWidgetDockableMixin, QtWidgets.QMainWindow, ui_window.Ui_onionSkinRenderer):
#
def __init__(self, parent = getMayaMainWindow()):
super(OSRController, self).__init__(parent)
# the dockable feature creates this control that needs to be deleted manually
# otherwise it throws an error that this name already exists
self.deleteControl('onionSkinRendererWorkspaceControl')
# This registers the override in maya
# I previously had it as plugin, but this made it impossible to get
# the OSR_INSTANCE (sth to do with python namespaces i guess)
# so i just call init myself.
# It feels a bit hacky, but it works anyway
core.initializeOverride()
# member variables
self.targetObjectsSet = set()
self.absoluteFramesSet = set()
self.preferences = {}
self.relativeFrameCount = 8
self.toolPath = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
self.activeEditor = None
# create the ui from the compiled qt designer file
self.setupUi(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.createConnections()
# load settings from the settings file
self.loadSettings()
#
def closeEvent(self, event):
# when the UI is closed, deactivate the override
if DEBUG_ALL: print 'close event start'
self.saveSettings()
core.uninitializeOverride()
if DEBUG_ALL: print 'close event end'
# special event for the dockable feature
def dockCloseEventTriggered(self, event):
if DEBUG_ALL: print 'dock close event start'
self.saveSettings()
core.uninitializeOverride()
if DEBUG_ALL: print 'dock close event end'
# code from https://gist.github.com/liorbenhorin/217bfb7e54c6f75b9b1b2b3d73a1a43a
def deleteControl(self, control):
if DEBUG_ALL: print 'delete Control'
if cmds.workspaceControl(control, q=True, exists=True):
cmds.workspaceControl(control, e=True, close=True)
cmds.deleteUI(control, control=True)
#
def createConnections(self):
self.targetObjects_add_btn.clicked.connect(self.addSelectedToTargetObjects)
self.targetObjects_remove_btn.clicked.connect(self.removeSelectedFromTargetObjects)
self.targetObjects_clear_btn.clicked.connect(self.clearTargetObjects)
self.toggleRenderer_btn.clicked.connect(self.toggleRenderer)
self.globalOpacity_slider.sliderMoved.connect(self.setGlobalOpacity)
self.onionType_cBox.currentTextChanged.connect(self.setOnionSkinDisplayMode)
self.drawBehind_chkBx.stateChanged.connect(self.setDrawBehind)
self.tint_type_cBox.currentTextChanged.connect(self.setTintType)
self.relative_futureTint_btn.clicked.connect(self.pickColor)
self.relative_pastTint_btn.clicked.connect(self.pickColor)
self.relative_tint_strength_slider.sliderMoved.connect(self.setTintStrength)
self.relative_keyframes_chkbx.clicked.connect(self.toggleRelativeKeyframeDisplay)
self.relative_step_spinBox.valueChanged.connect(self.setRelativeStep)
self.absolute_tint_btn.clicked.connect(self.pickColor)
self.absolute_addCrnt_btn.clicked.connect(self.addAbsoluteTargetFrame)
self.absolute_add_btn.clicked.connect(self.addAbsoluteTargetFrameFromSpinbox)
self.absolute_clear_btn.clicked.connect(self.clearAbsoluteTargetFrames)
self.settings_clearBuffer.triggered.connect(self.clearBuffer)
self.settings_autoClearBuffer.triggered.connect(self.setAutoClearBuffer)
self.settings_preferences.triggered.connect(self.changePrefs)
self.settings_saveSettings.triggered.connect(self.saveSettings)
self.targetObjects_grp.clicked.connect(self.toggleGroupBox)
self.onionSkinFrames_grp.clicked.connect(self.toggleGroupBox)
self.onionSkinSettings_grp.clicked.connect(self.toggleGroupBox)
# ------------------
# UI REFRESH
#
def refreshObjectList(self):
self.targetObjects_list.clear()
for obj in self.targetObjectsSet:
listWidget = TargetObjectListWidget()
listWidget.object_label.setText(obj.nodeName())
listWidget.object_remove_btn.clicked.connect(lambda b_obj = obj: self.removeTargetObject(b_obj))
listItem = QtWidgets.QListWidgetItem()
listItem.setSizeHint(listWidget.sizeHint())
self.targetObjects_list.addItem(listItem)
self.targetObjects_list.setItemWidget(listItem, listWidget)
#
def refreshRelativeFrame(self):
activeFrames = []
# clear the frame of all widgets first
for child in self.relative_frame.findChildren(OnionListFrame):
if child.frame_visibility_btn.isChecked():
activeFrames.append(int(child.frame_number.text()))
child.setParent(None)
# fill the relative frames list
for index in range(self.relativeFrameCount + 1):
if not index-self.relativeFrameCount/2 == 0:
listWidget = OnionListFrame()
frame = index-self.relativeFrameCount/2
listWidget.frame_number.setText(str(frame))
listWidget.frame_opacity_slider.setValue(75/abs(index-self.relativeFrameCount/2))
listWidget.frame_visibility_btn.toggled.connect(self.toggleRelativeTargetFrame)
if frame in activeFrames:
listWidget.frame_visibility_btn.setChecked(True)
activeFrames.remove(frame)
listWidget.frame_opacity_slider.sliderMoved.connect(self.setOpacityForRelativeTargetFrame)
self.relative_frame_layout.addWidget(listWidget)
# remove all remaining frames from onion skin renderer
# since their visibility is no longer accesible from ui
for frame in activeFrames:
core.OSR_INSTANCE.removeRelativeOnion(frame)
#
def refreshAbsoluteFrameTargetsList(self):
# remove any entries that don't exist anymore
framesInList = []
for i in reversed(xrange(self.absolute_list.count())):
frame = self.absolute_list.item(i).data(QtCore.Qt.UserRole)
framesInList.append(frame)
if frame not in self.absoluteFramesSet:
self.absolute_list.takeItem(i)
# add any missing entry
for frame in self.absoluteFramesSet:
if frame not in framesInList:
listWidget = OnionListFrame()
listWidget.frame_number.setText(str(int(frame)))
listWidget.frame_opacity_slider.setValue(core.OSR_INSTANCE.getOpacityOfAbsoluteFrame(int(frame)))
listWidget.addRemoveButton()
listWidget.frame_visibility_btn.setChecked(core.OSR_INSTANCE.absoluteTargetFrameExists(int(frame)))
listWidget.frame_remove_btn.clicked.connect(lambda b_frame = frame: self.removeAbsoluteTargetFrame(b_frame))
listWidget.frame_visibility_btn.toggled.connect(self.toggleAbsoluteTargetFrame)
listWidget.frame_opacity_slider.sliderMoved.connect(self.setOpacityForAbsoluteTargetFrame)
listItem = QtWidgets.QListWidgetItem()
listItem.setData(QtCore.Qt.UserRole, int(frame))
listItem.setSizeHint(listWidget.sizeHint())
# insert item at correct position
correctRow = 0
for i in xrange(self.absolute_list.count()):
if frame < self.absolute_list.item(i).data(QtCore.Qt.UserRole):
break
correctRow = i+1
self.absolute_list.insertItem(correctRow, listItem)
self.absolute_list.setItemWidget(listItem, listWidget)
# ---------------------------
# CONNECTIONS
#
def addSelectedToTargetObjects(self):
core.OSR_INSTANCE.addSelectedTargetObject()
for obj in pm.selected():
self.targetObjectsSet.add(obj)
self.refreshObjectList()
#
def removeSelectedFromTargetObjects(self):
core.OSR_INSTANCE.removeSelectedTargetObject()
for obj in pm.selected():
if obj in self.targetObjectsSet:
self.targetObjectsSet.remove(obj)
self.refreshObjectList()
#
def removeTargetObject(self, obj):
try:
core.OSR_INSTANCE.removeTargetObject(obj.fullPath())
except:
core.OSR_INSTANCE.removeTargetObject(obj.nodeName())
self.targetObjectsSet.remove(obj)
self.refreshObjectList()
#
def clearTargetObjects(self):
core.OSR_INSTANCE.clearTargetObjects()
self.targetObjectsSet.clear()
self.refreshObjectList()
#
def toggleRelativeTargetFrame(self):
sender = self.sender()
frame = sender.parent().findChild(QtWidgets.QLabel, 'frame_number').text()
sliderValue = sender.parent().findChild(QtWidgets.QSlider, 'frame_opacity_slider').value()
if sender.isChecked():
core.OSR_INSTANCE.addRelativeTargetFrame(frame, sliderValue)
else:
core.OSR_INSTANCE.removeRelativeTargetFrame(frame)
#
def toggleRelativeKeyframeDisplay(self):
sender = self.sender()
core.OSR_INSTANCE.setRelativeDisplayMode(self.sender().isChecked())
self.saveSettings()
#
def addAbsoluteTargetFrame(self, **kwargs):
frame = kwargs.setdefault('frame', pm.animation.getCurrentTime())
if int(frame) not in self.absoluteFramesSet:
core.OSR_INSTANCE.addAbsoluteTargetFrame(frame, 50)
self.absoluteFramesSet.add(frame)
self.refreshAbsoluteFrameTargetsList()
#
def addAbsoluteTargetFrameFromSpinbox(self):
frame = self.sender().parent().findChild(QtWidgets.QSpinBox, 'absolute_add_spinBox').value()
self.addAbsoluteTargetFrame(frame = frame)
#
def toggleAbsoluteTargetFrame(self):
sender = self.sender()
frame = sender.parent().findChild(QtWidgets.QLabel, 'frame_number').text()
sliderValue = sender.parent().findChild(QtWidgets.QSlider, 'frame_opacity_slider').value()
if sender.isChecked():
core.OSR_INSTANCE.addAbsoluteTargetFrame(frame, sliderValue)
else:
core.OSR_INSTANCE.removeAbsoluteTargetFrame(frame)
#
def removeAbsoluteTargetFrame(self, frame):
core.OSR_INSTANCE.removeAbsoluteTargetFrame(frame)
self.absoluteFramesSet.remove(frame)
self.refreshAbsoluteFrameTargetsList()
#
def clearAbsoluteTargetFrames(self):
core.OSR_INSTANCE.clearAbsoluteTargetFrames()
self.absoluteFramesSet.clear()
self.refreshAbsoluteFrameTargetsList()
#
def clearBuffer(self):
core.OSR_INSTANCE.clearOnionSkinBuffer()
#
def pickColor(self):
color = QtWidgets.QColorDialog.getColor()
if color.isValid():
self.setOnionSkinColor(self.sender(), color.getRgb())
self.saveSettings()
#
def setOpacityForRelativeTargetFrame(self):
opacity = self.sender().value()
frame = self.sender().parent().findChild(QtWidgets.QLabel, 'frame_number').text()
core.OSR_INSTANCE.setOpacityForRelativeTargetFrame(frame, opacity)
#
def setOpacityForAbsoluteTargetFrame(self):
opacity = self.sender().value()
frame = self.sender().parent().findChild(QtWidgets.QLabel, 'frame_number').text()
core.OSR_INSTANCE.setOpacityForAbsoluteTargetFrame(int(frame), opacity)
#
def setTintStrength(self):
core.OSR_INSTANCE.setTintStrength(
self.sender().value()
)
#
def setAutoClearBuffer(self):
value = self.sender().isChecked()
core.OSR_INSTANCE.setAutoClearBuffer(value)
#
def changePrefs(self):
prefUi = PreferencesWindow(self)
if prefUi.exec_():
values = prefUi.getValues()
core.OSR_INSTANCE.setMaxBuffer(values['maxBuffer'])
core.OSR_INSTANCE.setOutlineWidth(values['outlineWidth'])
core.OSR_INSTANCE.setTintSeed(values['tintSeed'])
self.relativeFrameCount = values['relativeKeyCount']*2
self.refreshRelativeFrame()
self.saveSettings()
#
def setRelativeStep(self):
core.OSR_INSTANCE.setRelativeStep(self.sender().value())
self.saveSettings()
# togle active or saved editor between onion Skin Renderer and vp2
def toggleRenderer(self):
modelPanelList = []
modelEditorList = pm.lsUI(editors=True)
# find all model panels
for myModelPanel in modelEditorList:
if myModelPanel.find('modelPanel') != -1:
modelPanelList.append(myModelPanel)
onionPanel = None
# if any of those is already set to onion skin renderer
for modelPanel in modelPanelList:
if pm.uitypes.ModelEditor(modelPanel).getRendererOverrideName() == 'onionSkinRenderer':
onionPanel = pm.uitypes.ModelEditor(modelPanel)
break
# if there is a panel with the onion skin renderer
# deactivate it and save the panel
if onionPanel:
try:
# Always better to try in the case of active panel operations
# as the active panel might not be a viewport.
onionPanel.setRendererOverrideName('')
self.activeEditor = onionPanel
except Exception as e:
# Handle exception
print e
else:
# if there is a saved editor panel activate the renderer on it
if self.activeEditor:
self.activeEditor.setRendererOverrideName('onionSkinRenderer')
# else toggle the active one
else:
for modelPanel in modelPanelList:
if pm.uitypes.ModelEditor(modelPanel).getActiveView():
try:
if pm.uitypes.ModelEditor(modelPanel).getRendererOverrideName() == '':
pm.uitypes.ModelEditor(modelPanel).setRendererOverrideName('onionSkinRenderer')
else:
pm.uitypes.ModelEditor(modelPanel).setRendererOverrideName('')
except Exception as e:
# Handle exception
print e
#
def setGlobalOpacity(self):
core.OSR_INSTANCE.setGlobalOpacity(self.sender().value())
#
def setOnionSkinDisplayMode(self):
core.OSR_INSTANCE.setOnionSkinDisplayMode(self.onionType_cBox.currentIndex())
#
def setDrawBehind(self):
core.OSR_INSTANCE.setDrawBehind(self.drawBehind_chkBx.isChecked())
#
def toggleGroupBox(self):
h = self.sender().maximumHeight()
if h > 100000:
self.sender().setMaximumHeight(14)
else:
self.sender().setMaximumHeight(200000)
#
def setTintType(self):
tintType = self.tint_type_cBox.currentIndex()
if tintType == 0:
self.constant_col_widget.setMaximumHeight(16777215)
self.constant_col_widget.setEnabled(True)
else:
self.constant_col_widget.setMaximumHeight(0)
self.constant_col_widget.setEnabled(False)
core.OSR_INSTANCE.setTintType(tintType)
# UTILITY
#
def setOnionSkinColor(self, btn, rgba):
btn.setStyleSheet('background-color: rgb(%s,%s,%s);'%(rgba[0], rgba[1], rgba[2]))
core.OSR_INSTANCE.setTint(rgba, btn.objectName())
#
def loadSettings(self):
with open(os.path.join(self.toolPath,'settings.txt')) as json_file:
self.preferences = json.load(json_file)
self.settings_autoClearBuffer.setChecked(self.preferences.setdefault('autoClearBuffer',True))
core.OSR_INSTANCE.setAutoClearBuffer(self.preferences.setdefault('autoClearBuffer',True))
self.relative_keyframes_chkbx.setChecked(self.preferences.setdefault('displayKeyframes',True))
core.OSR_INSTANCE.setRelativeDisplayMode(self.preferences.setdefault('displayKeyframes',True))
self.setOnionSkinColor(self.relative_futureTint_btn, self.preferences.setdefault('rFutureTint',[0,0,125]))
self.setOnionSkinColor(self.relative_pastTint_btn, self.preferences.setdefault('rPastTint',[0,125,0]))
self.setOnionSkinColor(self.absolute_tint_btn, self.preferences.setdefault('aTint', [125,0,0]))
core.OSR_INSTANCE.setTintSeed(self.preferences.setdefault('tintSeed', 0))
self.tint_type_cBox.setCurrentIndex(self.preferences.setdefault('tintType',0))
self.onionType_cBox.setCurrentIndex(self.preferences.setdefault('onionType',1))
self.drawBehind_chkBx.setChecked(self.preferences.setdefault('drawBehind', True))
self.relativeFrameCount = self.preferences.setdefault('relativeFrameAmount',4)
self.refreshRelativeFrame()
activeRelativeFrames = self.preferences.setdefault('activeRelativeFrames',[])
for child in self.relative_frame.findChildren(OnionListFrame):
if int(child.frame_number.text()) in activeRelativeFrames:
child.frame_visibility_btn.setChecked(True)
self.relative_step_spinBox.setValue(self.preferences.setdefault('relativeStep', 1))
core.OSR_INSTANCE.setMaxBuffer(self.preferences.setdefault('maxBufferSize', 200))
core.OSR_INSTANCE.setOutlineWidth(self.preferences.setdefault('outlineWidth',3))
# save values into a json file
def saveSettings(self):
if DEBUG_ALL: print 'start save'
data = {}
data['autoClearBuffer'] = self.settings_autoClearBuffer.isChecked()
data['displayKeyframes'] = self.relative_keyframes_chkbx.isChecked()
data['rFutureTint'] = self.extractRGBFromStylesheet(self.relative_futureTint_btn.styleSheet())
data['rPastTint'] = self.extractRGBFromStylesheet(self.relative_pastTint_btn.styleSheet())
data['aTint'] = self.extractRGBFromStylesheet(self.absolute_tint_btn.styleSheet())
data['tintSeed'] = core.OSR_INSTANCE.getTintSeed()
data['tintType'] = self.tint_type_cBox.currentIndex()
data['relativeFrameAmount'] = self.relativeFrameCount
data['relativeStep'] = self.relative_step_spinBox.value()
data['maxBufferSize'] = core.OSR_INSTANCE.getMaxBuffer()
data['outlineWidth'] = core.OSR_INSTANCE.getOutlineWidth()
data['onionType'] = self.onionType_cBox.currentIndex()
data['drawBehind'] = self.drawBehind_chkBx.isChecked()
data['activeRelativeFrames'] = self.getActiveRelativeFrameIndices()
with open(os.path.join(self.toolPath,'settings.txt'), 'w') as outfile:
json.dump(data, outfile)
if DEBUG_ALL: print 'end save'
#
def extractRGBFromStylesheet(self, s):
return map(int,(s[s.find("(")+1:s.find(")")]).split(','))
def getActiveRelativeFrameIndices(self):
activeFrames = []
# clear the frame of all widgets first
for child in self.relative_frame.findChildren(OnionListFrame):
if child.frame_visibility_btn.isChecked():
activeFrames.append(int(child.frame_number.text()))
return activeFrames
'''
FRAME WIDGET
the widget for displaying a frame in a list. includes visibility, opacity slider
and on demand a remove button
'''
class OnionListFrame(QtWidgets.QWidget, wdgt_Frame.Ui_onionSkinFrame_layout):
def __init__(self, parent = getMayaMainWindow()):
super(OnionListFrame, self).__init__(parent)
self.setupUi(self)
def addRemoveButton(self):
self.frame_remove_btn = QtWidgets.QPushButton('rm')
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_remove_btn.sizePolicy().hasHeightForWidth())
self.frame_remove_btn.setSizePolicy(sizePolicy)
self.frame_remove_btn.setMinimumSize(QtCore.QSize(16, 16))
self.frame_remove_btn.setMaximumSize(QtCore.QSize(16, 16))
self.frame_widget_layout.addWidget(self.frame_remove_btn)
'''
OBJECT WIDGET
the widget for displaying an object in a list
'''
class TargetObjectListWidget(QtWidgets.QWidget, wdgt_MeshListObj.Ui_onionSkinObject_layout):
def __init__(self, parent = getMayaMainWindow()):
super(TargetObjectListWidget, self).__init__(parent)
self.setupUi(self)
'''
Settings Dialog
in this window the user can set some preferences
'''
class PreferencesWindow(QtWidgets.QDialog, wdgt_Preferences.Ui_onionSkinRendererPreferences):
def __init__(self, parent):
super(PreferencesWindow, self).__init__(parent)
self.setupUi(self)
self.relativeKeyCount_spinBox.setValue(parent.relativeFrameCount/2)
self.maxBuffer_spinBox.setValue(core.OSR_INSTANCE.getMaxBuffer())
self.outlineWidth_spinBox.setValue(core.OSR_INSTANCE.getOutlineWidth())
self.tintSeed_spinBox.setValue(core.OSR_INSTANCE.getTintSeed())
def getValues(self):
values = {}
values['maxBuffer'] = self.maxBuffer_spinBox.value()
values['relativeKeyCount'] = self.relativeKeyCount_spinBox.value()
values['outlineWidth'] = self.outlineWidth_spinBox.value()
values['tintSeed'] = self.tintSeed_spinBox.value()
return values | mit | 7,300,354,918,632,017,000 | 39.098497 | 124 | 0.666042 | false |
longman694/youtube-dl | youtube_dl/extractor/xhamster.py | 15 | 8650 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
dict_get,
ExtractorError,
int_or_none,
parse_duration,
unified_strdate,
)
class XHamsterIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:.+?\.)?xhamster\.com/
(?:
movies/(?P<id>\d+)/(?P<display_id>[^/]*)\.html|
videos/(?P<display_id_2>[^/]*)-(?P<id_2>\d+)
)
'''
_TESTS = [{
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
'md5': '8281348b8d3c53d39fffb377d24eac4e',
'info_dict': {
'id': '1509445',
'display_id': 'femaleagent_shy_beauty_takes_the_bait',
'ext': 'mp4',
'title': 'FemaleAgent Shy beauty takes the bait',
'upload_date': '20121014',
'uploader': 'Ruseful2011',
'duration': 893,
'age_limit': 18,
'categories': ['Fake Hub', 'Amateur', 'MILFs', 'POV', 'Boss', 'Office', 'Oral', 'Reality', 'Sexy'],
},
}, {
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
'info_dict': {
'id': '2221348',
'display_id': 'britney_spears_sexy_booty',
'ext': 'mp4',
'title': 'Britney Spears Sexy Booty',
'upload_date': '20130914',
'uploader': 'jojo747400',
'duration': 200,
'age_limit': 18,
'categories': ['Britney Spears', 'Celebrities', 'HD Videos', 'Sexy', 'Sexy Booty'],
},
'params': {
'skip_download': True,
},
}, {
# empty seo
'url': 'http://xhamster.com/movies/5667973/.html',
'info_dict': {
'id': '5667973',
'ext': 'mp4',
'title': '....',
'upload_date': '20160208',
'uploader': 'parejafree',
'duration': 72,
'age_limit': 18,
'categories': ['Amateur', 'Blowjobs'],
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html',
'only_matching': True,
}, {
# This video is visible for marcoalfa123456's friends only
'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html',
'only_matching': True,
}, {
# new URL schema
'url': 'https://pt.xhamster.com/videos/euro-pedal-pumping-7937821',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_2')
display_id = mobj.group('display_id') or mobj.group('display_id_2')
webpage = self._download_webpage(url, video_id)
error = self._html_search_regex(
r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
title = self._html_search_regex(
[r'<h1[^>]*>([^<]+)</h1>',
r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"',
r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'],
webpage, 'title')
formats = []
format_urls = set()
sources = self._parse_json(
self._search_regex(
r'sources\s*:\s*({.+?})\s*,?\s*\n', webpage, 'sources',
default='{}'),
video_id, fatal=False)
for format_id, format_url in sources.items():
if not isinstance(format_url, compat_str):
continue
if format_url in format_urls:
continue
format_urls.add(format_url)
formats.append({
'format_id': format_id,
'url': format_url,
'height': int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
})
video_url = self._search_regex(
[r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
webpage, 'video url', group='mp4', default=None)
if video_url and video_url not in format_urls:
formats.append({
'url': video_url,
})
self._sort_formats(formats)
# Only a few videos have an description
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
description = mobj.group(1) if mobj else None
upload_date = unified_strdate(self._search_regex(
r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}',
webpage, 'upload date', fatal=False))
uploader = self._html_search_regex(
r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+><span[^>]+>([^<]+)',
webpage, 'uploader', default='anonymous')
thumbnail = self._search_regex(
[r'''thumb\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
r'''<video[^>]+poster=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = parse_duration(self._search_regex(
[r'<[^<]+\bitemprop=["\']duration["\'][^<]+\bcontent=["\'](.+?)["\']',
r'Runtime:\s*</span>\s*([\d:]+)'], webpage,
'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'content=["\']User(?:View|Play)s:(\d+)',
webpage, 'view count', fatal=False))
mobj = re.search(r'hint=[\'"](?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes', webpage)
(like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
comment_count = mobj.group('commentcount') if mobj else 0
age_limit = self._rta_search(webpage)
categories_html = self._search_regex(
r'(?s)<table.+?(<span>Categories:.+?)</table>', webpage,
'categories', default=None)
categories = [clean_html(category) for category in re.findall(
r'<a[^>]+>(.+?)</a>', categories_html)] if categories_html else None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'upload_date': upload_date,
'uploader': uploader,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': int_or_none(like_count),
'dislike_count': int_or_none(dislike_count),
'comment_count': int_or_none(comment_count),
'age_limit': age_limit,
'categories': categories,
'formats': formats,
}
class XHamsterEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xhamster\.com/xembed\.php\?video=(?P<id>\d+)'
_TEST = {
'url': 'http://xhamster.com/xembed.php?video=3328539',
'info_dict': {
'id': '3328539',
'ext': 'mp4',
'title': 'Pen Masturbation',
'upload_date': '20140728',
'uploader_id': 'anonymous',
'duration': 5,
'age_limit': 18,
}
}
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'href="(https?://xhamster\.com/movies/%s/[^"]*\.html[^"]*)"' % video_id,
webpage, 'xhamster url', default=None)
if not video_url:
vars = self._parse_json(
self._search_regex(r'vars\s*:\s*({.+?})\s*,\s*\n', webpage, 'vars'),
video_id)
video_url = dict_get(vars, ('downloadLink', 'homepageLink', 'commentsLink', 'shareUrl'))
return self.url_result(video_url, 'XHamster')
| unlicense | -3,816,557,424,709,412,400 | 36.124464 | 117 | 0.485549 | false |
havardgulldahl/pling-plong-odometer | test/test_xmeml.py | 1 | 1545 | # encoding: utf-8
# tests of xmeml logic
from distutils import dir_util
from pytest import fixture
import os
import sys
import pytest
from xmeml import iter as xmemliter
## THANK YOU http://stackoverflow.com/a/29631801
@fixture
def datadir(tmpdir, request):
'''
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
'''
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, bytes(tmpdir))
return tmpdir
def _fsdecode(b):
try:
return b.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
pass
try:
return b.decode("iso-8859-15")
except UnicodeEncodeError:
return b.decode("utf-8", "replace")
def test_xmemlsamples(datadir):
def load(f):
print("test load {f!r}".format(f=_fsdecode(f.basename)))
xmeml = xmemliter.XmemlParser(f.open())
audioclips, audiofiles = xmeml.audibleranges()
return xmeml
#print datadir.listdir()
for xml in datadir.listdir(fil=lambda x: _fsdecode(x.basename).upper().endswith(".XML")):
_xmlbasename = _fsdecode(xml.basename)
if _xmlbasename == 'EMPTY_SEQUENCE.xml':
# empty sequence test, expect error raised
with pytest.raises(xmemliter.XmemlFileError):
load(xml)
continue
assert load(xml)
| gpl-3.0 | 834,200,219,906,065,400 | 25.637931 | 93 | 0.654369 | false |
buffer/phoneyc | ActiveX/ActiveX.py | 3 | 5040 |
import os
import config
from binascii import hexlify
from CLSID import clsidlist, clsnamelist
from Attr2Fun import Attr2Fun
eventlist = []
funname = []
class unknownObject(object):
def __init__(self, name, parent = None):
self.__dict__['__name'] = name
if not parent:
self.__dict__['tagName'] = 'object'
else:
self.__dict__['__parent'] = parent
def __getattr__(self, name):
funname.append(name)
add_event(self, 'get', name)
self.__dict__[name] = unknownObject(name, self)
return self.__dict__[name]
def __call__(self, *arg):
if len(str(arg)) >= 50:
add_alert('Warning: argument of function ' + funname[-1] + ' length = ' + str(len(str(arg))))
elif str(arg).lower().find('http://') != -1:
add_alert('Warning: argument of function ' + funname[-1] + ' contains url: ' + str(arg))
elif str(arg).lower().find('c:\\') != -1:
add_alert('Warning: argument of function ' + funname[-1] + ' contains filename: ' +str(arg))
add_event(self, 'call', arg)
if '__argument' not in self.__dict__:
self.__dict__['__argument'] = []
returnval = unknownObject('__argument', self)
returnval.__dict__['__argument'] = arg
return returnval
def __setattr__(self, name, val):
add_event(self, 'set', name, val)
if len(str([val])) >= 50:
add_alert('Warning: attribute ' + name + ' length = ' + str(len(str([val]))))
elif str([val]).lower().find('http://') != -1:
add_alert('Warning: attribute ' + name + ' contains url: ' + str([val]))
elif str([val]).lower().find('c:\\') != -1:
add_alert('Warning: attribute ' + name + ' contains file name: ' + str([val]))
self.__dict__[name] = val
def __noSuchMethod__(self, name, *arg):
add_event(self, 'call', arg)
for i in arg:
if isinstance(i, str) and len(i)>=50:
add_alert('Warning: argument of function '+name+' length = '+str(len(i)))
return unknownObject()
class ActiveXObject(unknownObject):
def __init__(self, cls, clstype = 'name'):
config.VERBOSE(config.VERBOSE_WARNING, "[WARNING] New ActiveX Object: " + cls)
unknownObject.__init__(self, cls)
filename = ''
if clstype == 'id':
if len(cls) >= 6 and (cls[0:6] == 'clsid:' or cls[0:6] == 'CLSID:'):
cls = cls[6:].upper()
if cls in clsidlist.keys():
filename = clsidlist[cls]
else:
if cls in clsnamelist:
filename = clsnamelist[cls]
self.__dict__['__name'] = filename
# config.VERBOSE(config.VERBOSE_WARNING, config.universal_activex)
if not config.universal_activex:
self.check_raise_warning(filename, cls)
if filename:
exec load_src(filename)
def is_enabled_mock_activex(self):
return int(os.environ['PHONEYC_MOCK_ACTIVEX'])
def raise_warning(self, cls):
print "[WARNING] Unknown ActiveX Object: %s" % (cls, )
raise UserWarning
def check_raise_warning(self, filename, cls):
if not filename:
self.raise_warning(cls)
module = "ActiveX/modules/%s" % (filename, )
if not self.is_enabled_mock_activex() and not os.access(module, os.F_OK):
self.raise_warning(cls)
def __setattr__(self, name, val):
key = self.__dict__['__name'] + name
if key in Attr2Fun.keys():
Attr2Fun[name](val)
else:
unknownObject.__setattr__(self, name, val)
def load_src(filename):
module = "ActiveX/modules/%s" % (filename, )
script = ''
try:
fd = open(module, 'r')
script = fd.read()
fd.close()
except IOError:
pass
finally:
return script
def add_alert(alert):
config.VERBOSE(config.VERBOSE_DEFAULT, '[ALERT] ' + alert)
def add_event(target, evttype, *arg):
invstack = [target]
while '__parent' in target.__dict__:
target = target.__dict__['__parent']
invstack.append(target)
eventlog = 'ActiveXObject'
for obj in invstack:
if obj.__dict__['__name'] == '__argument':
eventlog += str(obj.__dict__['__argument'])
else:
eventlog += '.' + obj.__dict__['__name']
if evttype == 'get':
eventlog += '.' + arg[0]
if evttype == 'set':
eventlog += '.' + arg[0] + '=' + str(arg[1])
if evttype == 'call':
eventlog += str(arg[0])
eventlist.append(eventlog)
def write_log(filename):
if not eventlist:
config.VERBOSE(config.VERBOSE_DEBUG, '[DEBUG] in ActiveX.py: No ActiveXObject found.')
return
try:
fd = open('log/' + filename, 'wb')
for log in eventlist:
fd.write(log + '\n')
fd.close()
print 'Log written into: log/' + filename
except IOError:
pass
| gpl-2.0 | -2,631,485,755,720,054,000 | 32.377483 | 105 | 0.538492 | false |
tharp/linux-minidisc | netmd/libnetmd.py | 4 | 44422 | import libusb1
from cStringIO import StringIO
from time import sleep
from struct import pack
try:
from Crypto.Cipher import DES
from Crypto.Cipher import DES3
except ImportError:
DES = None
DES3 = None
import array
import random
def dump(data):
if isinstance(data, basestring):
result = ' '.join(['%02x' % (ord(x), ) for x in data])
else:
result = repr(data)
return result
class defaultUploadEvents:
def progress(self, current):
print 'Done: %x/%x (%.02f%%)' % (current, self.total,
current/float(self.total) * 100)
def trackinfo(self, frames, bytes, format):
self.total = bytes;
KNOWN_USB_ID_SET = frozenset([
(0x04dd, 0x7202), # Sharp IM-MT899H
(0x054c, 0x0075), # Sony MZ-N1
(0x054c, 0x0080), # Sony LAM-1
(0x054c, 0x0081), # Sony MDS-JB980
(0x054c, 0x0084), # Sony MZ-N505
(0x054c, 0x0085), # Sony MZ-S1
(0x054c, 0x0086), # Sony MZ-N707
(0x054c, 0x00c6), # Sony MZ-N10
(0x054c, 0x00c7), # Sony MZ-N910
(0x054c, 0x00c8), # Sony MZ-N710/NF810
(0x054c, 0x00c9), # Sony MZ-N510/N610
(0x054c, 0x00ca), # Sony MZ-NE410/NF520D
(0x054c, 0x00eb), # Sony MZ-NE810/NE910
(0x054c, 0x0101), # Sony LAM-10
(0x054c, 0x0113), # Aiwa AM-NX1
(0x054c, 0x014c), # Aiwa AM-NX9
(0x054c, 0x017e), # Sony MZ-NH1
(0x054c, 0x0180), # Sony MZ-NH3D
(0x054c, 0x0182), # Sony MZ-NH900
(0x054c, 0x0184), # Sony MZ-NH700/NH800
(0x054c, 0x0186), # Sony MZ-NH600/NH600D
(0x054c, 0x0188), # Sony MZ-N920
(0x054c, 0x018a), # Sony LAM-3
(0x054c, 0x01e9), # Sony MZ-DH10P
(0x054c, 0x0219), # Sony MZ-RH10
(0x054c, 0x021b), # Sony MZ-RH710/MZ-RH910
(0x054c, 0x022c), # Sony CMT-AH10 (stereo set with integrated MD)
(0x054c, 0x023c), # Sony DS-HMD1 (device without analog music rec/playback)
(0x054c, 0x0286), # Sony MZ-RH1
])
def iterdevices(usb_context, bus=None, device_address=None):
"""
Iterator for plugged-in NetMD devices.
Parameters:
usb_context (usb1.LibUSBContext)
Some usb1.LibUSBContext instance.
bus (None, int)
Only scan this bus.
device_address (None, int)
Only scan devices at this address on each scanned bus.
Returns (yields) NetMD instances.
"""
for device in usb_context.getDeviceList():
if bus is not None and bus != device.getBusNumber():
continue
if device_address is not None and \
device_address != device.getDeviceAddress():
continue
if (device.getVendorID(), device.getProductID()) in KNOWN_USB_ID_SET:
yield NetMD(device.open())
# XXX: Endpoints numbers are hardcoded
BULK_WRITE_ENDPOINT = 0x02
BULK_READ_ENDPOINT = 0x81
# NetMD Protocol return status (first byte of request)
STATUS_CONTROL = 0x00
STATUS_STATUS = 0x01
STATUS_SPECIFIC_INQUIRY = 0x02
STATUS_NOTIFY = 0x03
STATUS_GENERAL_INQUIRY = 0x04
# ... (first byte of response)
STATUS_NOT_IMPLEMENTED = 0x08
STATUS_ACCEPTED = 0x09
STATUS_REJECTED = 0x0a
STATUS_IN_TRANSITION = 0x0b
STATUS_IMPLEMENTED = 0x0c
STATUS_CHANGED = 0x0d
STATUS_INTERIM = 0x0f
class NetMDException(Exception):
"""
Base exception for all NetMD exceptions.
"""
pass
class NetMDNotImplemented(NetMDException):
"""
NetMD protocol "operation not implemented" exception.
"""
pass
class NetMDRejected(NetMDException):
"""
NetMD protocol "operation rejected" exception.
"""
pass
class NetMD(object):
"""
Low-level interface for a NetMD device.
"""
def __init__(self, usb_handle, interface=0):
"""
usb_handle (usb1.USBDeviceHandle)
USB device corresponding to a NetMD player.
interface (int)
USB interface implementing NetMD protocol on the USB device.
"""
self.usb_handle = usb_handle
self.interface = interface
usb_handle.setConfiguration(1)
usb_handle.claimInterface(interface)
if self._getReplyLength() != 0:
self.readReply()
def __del__(self):
try:
self.usb_handle.resetDevice()
self.usb_handle.releaseInterface(self.interface)
except: # Should specify an usb exception
pass
def _getReplyLength(self):
reply = self.usb_handle.controlRead(libusb1.LIBUSB_TYPE_VENDOR | \
libusb1.LIBUSB_RECIPIENT_INTERFACE,
0x01, 0, 0, 4)
return ord(reply[2])
def sendCommand(self, command):
"""
Send a raw binary command to device.
command (str)
Binary command to send.
"""
#print '%04i> %s' % (len(command), dump(command))
self.usb_handle.controlWrite(libusb1.LIBUSB_TYPE_VENDOR | \
libusb1.LIBUSB_RECIPIENT_INTERFACE,
0x80, 0, 0, command)
def readReply(self):
"""
Get a raw binary reply from device.
Returns the reply.
"""
reply_length = 0
while reply_length == 0:
reply_length = self._getReplyLength()
if reply_length == 0: sleep(0.1)
reply = self.usb_handle.controlRead(libusb1.LIBUSB_TYPE_VENDOR | \
libusb1.LIBUSB_RECIPIENT_INTERFACE,
0x81, 0, 0, reply_length)
#print '%04i< %s' % (len(reply), dump(reply))
return reply
def readBulk(self, length):
"""
Read bulk data from device.
length (int)
Length of data to read.
Returns data read.
"""
result = StringIO()
self.readBulkToFile(length, result)
return result.getvalue()
def readBulkToFile(self, length, outfile, chunk_size=0x10000, callback=lambda(a):None):
"""
Read bulk data from device, and write it to a file.
length (int)
Length of data to read.
outfile (str)
Path to output file.
chunk_size (int)
Keep this much data in memory before flushing it to file.
"""
done = 0
while done < length:
received = self.usb_handle.bulkRead(BULK_READ_ENDPOINT,
min((length - done), chunk_size))
done += len(received)
outfile.write(received)
callback(done)
def writeBulk(self, data):
"""
Write data to device.
data (str)
Data to write.
"""
self.usb_handle.bulkWrite(BULK_WRITE_ENDPOINT, data)
ACTION_PLAY = 0x75
ACTION_PAUSE = 0x7d
ACTION_FASTFORWARD = 0x39
ACTION_REWIND = 0x49
TRACK_PREVIOUS = 0x0002
TRACK_NEXT = 0x8001
TRACK_RESTART = 0x0001
ENCODING_SP = 0x90
ENCODING_LP2 = 0x92
ENCODING_LP4 = 0x93
CHANNELS_MONO = 0x01
CHANNELS_STEREO = 0x00
CHANNEL_COUNT_DICT = {
CHANNELS_MONO: 1,
CHANNELS_STEREO: 2
}
OPERATING_STATUS_USB_RECORDING = 0x56ff
OPERATING_STATUS_RECORDING = 0xc275
OPERATING_STATUS_RECORDING_PAUSED = 0xc27d
OPERATING_STATUS_FAST_FORWARDING = 0xc33f
OPERATING_STATUS_REWINDING = 0xc34f
OPERATING_STATUS_PLAYING = 0xc375
OPERATING_STATUS_PAUSED = 0xc37d
OPERATING_STATUS_STOPPED = 0xc5ff
TRACK_FLAG_PROTECTED = 0x03
DISC_FLAG_WRITABLE = 0x10
DISC_FLAG_WRITE_PROTECTED = 0x40
DISKFORMAT_LP4 = 0
DISKFORMAT_LP2 = 2
DISKFORMAT_SP_MONO = 4
DISKFORMAT_SP_STEREO = 6
WIREFORMAT_PCM = 0
WIREFORMAT_105KBPS = 0x90
WIREFORMAT_LP2 = 0x94
WIREFORMAT_LP4 = 0xA8
_FORMAT_TYPE_LEN_DICT = {
'b': 1, # byte
'w': 2, # word
'd': 4, # doubleword
'q': 8, # quadword
}
def BCD2int(bcd):
"""
Convert BCD number of an arbitrary length to an int.
bcd (int)
bcd number
Returns the same number as an int.
"""
value = 0
nibble = 0
while bcd:
nibble_value = bcd & 0xf
bcd >>= 4
value += nibble_value * (10 ** nibble)
nibble += 1
return value
def int2BCD(value, length=1):
"""
Convert an int into a BCD number.
value (int)
Integer value.
length (int)
Length limit for output number, in bytes.
Returns the same value in BCD.
"""
if value > 10 ** (length * 2 - 1):
raise ValueError('Value %r cannot fit in %i bytes in BCD' %
(value, length))
bcd = 0
nibble = 0
while value:
value, nibble_value = divmod(value, 10)
bcd |= nibble_value << (4 * nibble)
nibble += 1
return bcd
class NetMDInterface(object):
"""
High-level interface for a NetMD device.
Notes:
Track numbering starts at 0.
First song position is 0:0:0'1 (0 hours, 0 minutes, 0 second, 1 sample)
wchar titles are probably shift-jis encoded (hint only, nothing relies
on this in this file)
"""
def __init__(self, net_md):
"""
net_md (NetMD)
Interface to the NetMD device to use.
"""
self.net_md = net_md
def send_query(self, query, test=False):
# XXX: to be removed (replaced by 2 separate calls)
self.sendCommand(query, test=test)
return self.readReply()
def sendCommand(self, query, test=False):
if test:
query = [STATUS_SPECIFIC_INQUIRY, ] + query
else:
query = [STATUS_CONTROL, ] + query
binquery = ''.join(chr(x) for x in query)
self.net_md.sendCommand(binquery)
def readReply(self):
result = self.net_md.readReply()
status = ord(result[0])
if status == STATUS_NOT_IMPLEMENTED:
raise NetMDNotImplemented('Not implemented')
elif status == STATUS_REJECTED:
raise NetMDRejected('Rejected')
elif status not in (STATUS_ACCEPTED, STATUS_IMPLEMENTED,
STATUS_INTERIM):
raise NotImplementedError('Unknown returned status: %02X' %
(status, ))
return result[1:]
def formatQuery(self, format, *args):
result = []
append = result.append
extend = result.extend
half = None
def hexAppend(value):
append(int(value, 16))
escaped = False
arg_stack = list(args)
for char in format:
if escaped:
escaped = False
value = arg_stack.pop(0)
if char in _FORMAT_TYPE_LEN_DICT:
for byte in xrange(_FORMAT_TYPE_LEN_DICT[char] - 1, -1, -1):
append((value >> (byte * 8)) & 0xff)
# String ('s' is 0-terminated, 'x' is not)
elif char in ('s', 'x'):
length = len(value)
if char == 's':
length += 1
append((length >> 8) & 0xff)
append(length & 0xff)
extend(ord(x) for x in value)
if char == 's':
append(0)
elif char == '*':
extend(ord(x) for x in value)
else:
raise ValueError('Unrecognised format char: %r' % (char, ))
continue
if char == '%':
assert half is None
escaped = True
continue
if char == ' ':
continue
if half is None:
half = char
else:
hexAppend(half + char)
half = None
assert len(arg_stack) == 0
return result
def scanQuery(self, query, format):
result = []
append = result.append
half = None
escaped = False
input_stack = list(query)
def pop():
return ord(input_stack.pop(0))
for char in format:
if escaped:
escaped = False
if char == '?':
pop()
continue
if char in _FORMAT_TYPE_LEN_DICT:
value = 0
for byte in xrange(_FORMAT_TYPE_LEN_DICT[char] - 1, -1, -1):
value |= (pop() << (byte * 8))
append(value)
# String ('s' is 0-terminated, 'x' is not)
elif char in ('s', 'x'):
length = pop() << 8 | pop()
value = ''.join(input_stack[:length])
input_stack = input_stack[length:]
if char == 's':
append(value[:-1])
else:
append(value)
# Fetch the remainder of the query in one value
elif char == '*':
value = ''.join(input_stack)
input_stack = []
append(value)
else:
raise ValueError('Unrecognised format char: %r' % (char, ))
continue
if char == '%':
assert half is None
escaped = True
continue
if char == ' ':
continue
if half is None:
half = char
else:
input_value = pop()
format_value = int(half + char, 16)
if format_value != input_value:
raise ValueError('Format and input mismatch at %i: '
'expected %02x, got %02x' % (
len(query) - len(input_stack) - 1,
format_value, input_value))
half = None
assert len(input_stack) == 0
return result
def acquire(self):
"""
Exclusive access to device.
XXX: what does it mean ?
"""
query = self.formatQuery('ff 010c ffff ffff ffff ffff ffff ffff')
reply = self.send_query(query)
self.scanQuery(reply, 'ff 010c ffff ffff ffff ffff ffff ffff')
def release(self):
"""
Release device previously acquired for exclusive access.
XXX: what does it mean ?
"""
query = self.formatQuery('ff 0100 ffff ffff ffff ffff ffff ffff')
reply = self.send_query(query)
self.scanQuery(reply, 'ff 0100 ffff ffff ffff ffff ffff ffff')
def getStatus(self):
"""
Get device status.
Returns device response (content meaning is largely unknown).
"""
query = self.formatQuery('1809 8001 0230 8800 0030 8804 00 ff00 ' \
'00000000')
reply = self.send_query(query)
return self.scanQuery(reply, '1809 8001 0230 8800 0030 8804 00 ' \
'1000 000900000 %x')[0]
def isDiskPresent(self):
"""
Is a disk present in device ?
Returns a boolean:
True: disk present
False: no disk
"""
status = self.getStatus()
return status[4] == 0x40
def getOperatingStatus(self):
query = self.formatQuery('1809 8001 0330 8802 0030 8805 0030 8806 ' \
'00 ff00 00000000')
reply = self.send_query(query)
return self.scanQuery(reply, '1809 8001 0330 8802 0030 8805 0030 ' \
'8806 00 1000 00%?0000 0006 8806 0002 %w')[0]
def _getPlaybackStatus(self, p1, p2):
query = self.formatQuery('1809 8001 0330 %w 0030 8805 0030 %w 00 ' \
'ff00 00000000',
p1, p2)
reply = self.send_query(query)
return self.scanQuery(reply, '1809 8001 0330 %?%? %?%? %?%? %?%? ' \
'%?%? %? 1000 00%?0000 %x')[0]
def getPlaybackStatus1(self):
return self._getPlaybackStatus(0x8801, 0x8807)
def getPlaybackStatus2(self):
# XXX: duplicate of getOperatingStatus
return self._getPlaybackStatus(0x8802, 0x8806)
def getPosition(self):
query = self.formatQuery('1809 8001 0430 8802 0030 8805 0030 0003 ' \
'0030 0002 00 ff00 00000000')
try:
reply = self.send_query(query)
except NetMDRejected: # No disc
result = None
else:
result = self.scanQuery(reply, '1809 8001 0430 %?%? %?%? %?%? ' \
'%?%? %?%? %?%? %?%? %? %?00 00%?0000 ' \
'000b 0002 0007 00 %w %b %b %b %b')
result[1] = BCD2int(result[1])
result[2] = BCD2int(result[2])
result[3] = BCD2int(result[3])
result[4] = BCD2int(result[4])
return result
def _play(self, action):
query = self.formatQuery('18c3 ff %b 000000', action)
reply = self.send_query(query)
self.scanQuery(reply, '18c3 00 %b 000000')
def play(self):
"""
Start playback on device.
"""
self._play(ACTION_PLAY)
def fast_forward(self):
"""
Fast-forward device.
"""
self._play(ACTION_FASTFORWARD)
def rewind(self):
"""
Rewind device.
"""
self._play(ACTION_REWIND)
def pause(self):
"""
Pause device.
"""
self._play(ACTION_PAUSE)
def stop(self):
"""
Stop playback on device.
"""
query = self.formatQuery('18c5 ff 00000000')
reply = self.send_query(query)
self.scanQuery(reply, '18c5 00 00000000')
def gotoTrack(self, track):
"""
Seek to begining of given track number on device.
"""
query = self.formatQuery('1850 ff010000 0000 %w', track)
reply = self.send_query(query)
return self.scanQuery(reply, '1850 00010000 0000 %w')[0]
def gotoTime(self, track, hour=0, minute=0, second=0, frame=0):
"""
Seek to given time of given track.
"""
query = self.formatQuery('1850 ff000000 0000 %w %b%b%b%b', track,
int2BCD(hour), int2BCD(minute),
int2BCD(second), int2BCD(frame))
reply = self.send_query(query)
return self.scanQuery(reply, '1850 00000000 %?%? %w %b%b%b%b')
def _trackChange(self, direction):
query = self.formatQuery('1850 ff10 00000000 %w', direction)
reply = self.send_query(query)
return self.scanQuery(reply, '1850 0010 00000000 %?%?')
def nextTrack(self):
"""
Go to begining of next track.
"""
self._trackChange(TRACK_NEXT)
def previousTrack(self):
"""
Go to begining of previous track.
"""
self._trackChange(TRACK_PREVIOUS)
def restartTrack(self):
"""
Go to begining of current track.
"""
self._trackChange(TRACK_RESTART)
def eraseDisc(self):
"""
Erase disc.
This is reported not to check for any track protection, and
unconditionaly earses everything.
"""
# XXX: test to see if it honors read-only disc mode.
query = self.formatQuery('1840 ff 0000')
reply = self.send_query(query)
self.scanQuery(reply, '1840 00 0000')
def syncTOC(self):
query = self.formatQuery('1808 10180200 00')
reply = self.send_query(query)
return self.scanQuery(reply, '1808 10180200 00')
def cacheTOC(self):
query = self.formatQuery('1808 10180203 00')
reply = self.send_query(query)
return self.scanQuery(reply, '1808 10180203 00')
def getDiscFlags(self):
"""
Get disc flags.
Returns a bitfield (see DISC_FLAG_* constants).
"""
query = self.formatQuery('1806 01101000 ff00 0001000b')
reply = self.send_query(query)
return self.scanQuery(reply, '1806 01101000 1000 0001000b %b')[0]
def getTrackCount(self):
"""
Get the number of disc tracks.
"""
query = self.formatQuery('1806 02101001 3000 1000 ff00 00000000')
reply = self.send_query(query)
data = self.scanQuery(reply, '1806 02101001 %?%? %?%? 1000 00%?0000 ' \
'%x')[0]
assert len(data) == 6, len(data)
assert data[:5] == '\x00\x10\x00\x02\x00', data[:5]
return ord(data[5])
def _getDiscTitle(self, wchar=False):
# XXX: long title support untested.
if wchar:
wchar_value = 1
else:
wchar_value = 0
done = 0
remaining = 0
total = 1
result = []
while done < total:
query = self.formatQuery('1806 02201801 00%b 3000 0a00 ff00 %w%w',
wchar_value, remaining, done)
reply = self.send_query(query)
if remaining == 0:
chunk_size, total, chunk = self.scanQuery(reply,
'1806 02201801 00%? 3000 0a00 1000 %w0000 %?%?000a %w %*')
chunk_size -= 6
else:
chunk_size, chunk = self.scanQuery(reply,
'1806 02201801 00%? 3000 0a00 1000 %w%?%? %*')
assert chunk_size == len(chunk)
result.append(chunk)
done += chunk_size
remaining = total - done
#if not wchar and len(result):
# assert result[-1] == '\x00'
# result = result[:-1]
return ''.join(result)
def getDiscTitle(self, wchar=False):
"""
Return disc title.
wchar (bool)
If True, return the content of wchar title.
If False, return the ASCII title.
"""
title = self._getDiscTitle(wchar=wchar)
if title.endswith('//'):
# this is a grouped minidisc which may have a disc title
# The disc title is always stored in the first entry and
# applied to the imaginary track 0
firstentry = title.split('//')[0]
if firstentry.startswith('0;'):
title = firstentry[2:len(firstentry)];
else:
title = '';
return title
def getTrackGroupList(self):
"""
Return a list representing track groups.
This list is composed of 2-tuples:
group title
track number list
"""
raw_title = self._getDiscTitle()
group_list = raw_title.split('//')
track_dict = {}
track_count = self.getTrackCount()
result = []
append = result.append
for group_index, group in enumerate(group_list):
if group == '': # (only ?) last group might be delimited but empty.
continue
if group[0] == '0' or ';' not in group: # Disk title
continue
track_range, group_name = group.split(';', 1)
if '-' in track_range:
track_min, track_max = track_range.split('-')
else:
track_min = track_max = track_range
track_min, track_max = int(track_min), int(track_max)
assert 0 <= track_min <= track_max <= track_count, (
track_min, track_max, track_count)
track_list = []
track_append = track_list.append
for track in xrange(track_min - 1, track_max):
if track in track_dict:
raise ValueError('Track %i is in 2 groups: %r[%i] & '
'%r[%i]' % (track, track_dict[track][0],
track_dict[track][1], group_name, group_index))
track_dict[track] = group_name, group_index
track_append(track)
append((group_name, track_list))
track_list = [x for x in xrange(track_count) if x not in track_dict]
if len(track_list):
append((None, track_list))
return result
def getTrackTitle(self, track, wchar=False):
"""
Return track title.
track (int)
Track number.
wchar (bool)
If True, return the content of wchar title.
If False, return the ASCII title.
"""
if wchar:
wchar_value = 3
else:
wchar_value = 2
query = self.formatQuery('1806 022018%b %w 3000 0a00 ff00 00000000',
wchar_value, track)
reply = self.send_query(query)
result = self.scanQuery(reply, '1806 022018%? %?%? %?%? %?%? 1000 ' \
'00%?0000 00%?000a %x')[0]
#if not wchar and len(result):
# assert result[-1] == '\x00'
# result = result[:-1]
return result
def setDiscTitle(self, title, wchar=False):
"""
Set disc title.
title (str)
The new title.
wchar (bool)
If True, return the content of wchar title.
If False, return the ASCII title.
"""
if wchar:
wchar = 1
else:
wchar = 0
old_len = len(self.getDiscTitle())
query = self.formatQuery('1807 02201801 00%b 3000 0a00 5000 %w 0000 ' \
'%w %s', wchar, len(title), old_len, title)
reply = self.send_query(query)
self.scanQuery(reply, '1807 02201801 00%? 3000 0a00 5000 %?%? 0000 ' \
'%?%?')
def setTrackTitle(self, track, title, wchar=False):
"""
Set track title.
track (int)
Track to retitle.
title (str)
The new title.
wchar (bool)
If True, return the content of wchar title.
If False, return the ASCII title.
"""
if wchar:
wchar = 3
else:
wchar = 2
try:
old_len = len(self.getTrackTitle(track))
except NetMDRejected:
old_len = 0
query = self.formatQuery('1807 022018%b %w 3000 0a00 5000 %w 0000 ' \
'%w %*', wchar, track, len(title), old_len,
title)
reply = self.send_query(query)
self.scanQuery(reply, '1807 022018%? %?%? 3000 0a00 5000 %?%? 0000 ' \
'%?%?')
def eraseTrack(self, track):
"""
Remove a track.
track (int)
Track to remove.
"""
query = self.formatQuery('1840 ff01 00 201001 %w', track)
reply = self.send_query(query)
self.scanQuery(reply, '1840 1001 00 201001 %?%?')
def moveTrack(self, source, dest):
"""
Move a track.
source (int)
Track position before moving.
dest (int)
Track position after moving.
"""
query = self.formatQuery('1843 ff00 00 201001 00 %w 201001 %w', source,
dest)
reply = self.send_query(query)
self.scanQuery(reply, '1843 0000 00 201001 00 %?%? 201001 %?%?')
def _getTrackInfo(self, track, p1, p2):
query = self.formatQuery('1806 02201001 %w %w %w ff00 00000000', track,
p1, p2)
reply = self.send_query(query)
return self.scanQuery(reply, '1806 02201001 %?%? %?%? %?%? 1000 ' \
'00%?0000 %x')[0]
def getTrackLength(self, track):
"""
Get track duration.
track (int)
Track to fetch information from.
Returns a list of 4 elements:
- hours
- minutes
- seconds
- samples (512 per second)
"""
raw_value = self._getTrackInfo(track, 0x3000, 0x0100)
result = self.scanQuery(raw_value, '0001 0006 0000 %b %b %b %b')
result[0] = BCD2int(result[0])
result[1] = BCD2int(result[1])
result[2] = BCD2int(result[2])
result[3] = BCD2int(result[3])
return result
def getTrackEncoding(self, track):
"""
Get track encoding parameters.
track (int)
Track to fetch information from.
Returns a list of 2 elements:
- codec (see ENCODING_* constants)
- channel number (see CHANNELS_* constants)
"""
return self.scanQuery(self._getTrackInfo(track, 0x3080, 0x0700),
'8007 0004 0110 %b %b')
def getTrackFlags(self, track):
"""
Get track flags.
track (int)
Track to fetch information from.
Returns a bitfield (See TRACK_FLAG_* constants).
"""
query = self.formatQuery('1806 01201001 %w ff00 00010008', track)
reply = self.send_query(query)
return self.scanQuery(reply, '1806 01201001 %?%? 10 00 00010008 %b') \
[0]
def getDiscCapacity(self):
"""
Get disc capacity.
Returns a list of 3 lists of 4 elements each (see getTrackLength).
The first list is the recorded duration.
The second list is the total disc duration (*).
The third list is the available disc duration (*).
(*): This result depends on current recording parameters.
"""
query = self.formatQuery('1806 02101000 3080 0300 ff00 00000000')
reply = self.send_query(query)
raw_result = self.scanQuery(reply, '1806 02101000 3080 0300 1000 ' \
'001d0000 001b 8003 0017 8000 0005 %w ' \
'%b %b %b 0005 %w %b %b %b 0005 %w %b ' \
'%b %b')
result = []
for offset in xrange(3):
offset *= 4
result.append([
BCD2int(raw_result[offset + 0]),
BCD2int(raw_result[offset + 1]),
BCD2int(raw_result[offset + 2]),
BCD2int(raw_result[offset + 3])])
return result
def getRecordingParameters(self):
"""
Get the current recording parameters.
See getTrackEncoding.
"""
query = self.formatQuery('1809 8001 0330 8801 0030 8805 0030 8807 ' \
'00 ff00 00000000')
reply = self.send_query(query)
return self.scanQuery(reply, '1809 8001 0330 8801 0030 8805 0030 ' \
'8807 00 1000 000e0000 000c 8805 0008 80e0 ' \
'0110 %b %b 4000')
def saveTrackToStream(self, track, outstream, events=defaultUploadEvents()):
"""
Digitaly dump a track to file.
This is only available on MZ-RH1.
track (int)
Track to extract.
outfile_name (str)
Path of file to save extracted data in.
"""
track += 1
query = self.formatQuery('1800 080046 f003010330 ff00 1001 %w', track)
reply = self.send_query(query)
(frames,codec,length) = self.scanQuery(reply, '1800 080046 f003010330 0000 1001 ' \
'%w %b %d')
events.trackinfo(frames, length, codec);
self.net_md.readBulkToFile(length, outstream, callback=events.progress)
reply = self.readReply()
self.scanQuery(reply, '1800 080046 f003010330 0000 1001 %?%? 0000')
# Prevent firmware lockups on successive saveTrackToStream calls
sleep(0.01)
def disableNewTrackProtection(self, val):
"""
NetMD downloaded tracks are usually protected from modification
at the MD device to prevent loosing the check-out license. This
setting can be changed on some later models to have them record
unprotected tracks, like Simple Burner does.
The setting stays in effect until endSecureSession, where it
is reset to 0.
val (int)
zero enables protection of future downloaded tracks, one
disables protection for these tracks.
"""
query = self.formatQuery('1800 080046 f0030103 2b ff %w', val)
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 2b 00 %?%?')
def enterSecureSession(self):
"""
Enter a session secured by a root key found in an EKB. The
EKB for this session has to be download after entering the
session.
"""
query = self.formatQuery('1800 080046 f0030103 80 ff')
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 80 00')
def leaveSecureSession(self):
"""
Forget the key material from the EKB used in the secure
session.
"""
query = self.formatQuery('1800 080046 f0030103 81 ff')
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 81 00')
def getLeafID(self):
"""
Read the leaf ID of the present NetMD device. The leaf ID tells
which keys the device posesses, which is needed to find out which
parts of the EKB needs to be sent to the device for it to decrypt
the root key.
The leaf ID is a 8-byte constant
"""
query = self.formatQuery('1800 080046 f0030103 11 ff')
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 11 00 %*')[0]
def sendKeyData(self, ekbid, keychain, depth, ekbsignature):
"""
Send key data to the device. The device uses it's builtin key
to decrypt the root key from an EKB.
ekbid (int)
The ID of the EKB.
keychain (list of 16-byte str)
A chain of encrypted keys. The one end of the chain is the
encrypted root key, the other end is a key encrypted by a key
the device has in it's key set. The direction of the chain is
not yet known.
depth (str)
Selects which key from the devices keyset has to be used to
start decrypting the chain. Each key in the key set corresponds
to a specific depth in the tree of device IDs.
ekbsignature
A 24 byte signature of the root key. Used to verify integrity
of the decrypted root key by the device.
"""
chainlen = len(keychain)
# 16 bytes header, 16 bytes per key, 24 bytes for the signature
databytes = 16 + 16*chainlen + 24
for key in keychain:
if len(key) != 16:
raise ValueError("Each key in the chain needs to have 16 bytes, this one has %d" % len(key))
if depth < 1 or depth > 63:
raise ValueError('Supplied depth is invalid')
if len(ekbsignature) != 24:
raise ValueError('Supplied EKB signature length wrong')
query = self.formatQuery('1800 080046 f0030103 12 ff %w %d' \
'%d %d %d 00000000 %* %*', databytes, databytes,
chainlen, depth, ekbid, "".join(keychain), ekbsignature)
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 12 01 %?%? %?%?%?%?')
def sessionKeyExchange(self, hostnonce):
"""
Exchange a session key with the device. Needs to have a root
key sent to the device using sendKeyData before.
hostnonce (str)
8 bytes random binary data
Returns
device nonce (str), another 8 bytes random data
"""
if len(hostnonce) != 8:
raise ValueError('Supplied host nonce length wrong')
query = self.formatQuery('1800 080046 f0030103 20 ff 000000 %*', hostnonce)
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 20 00 000000 %*')[0]
def sessionKeyForget(self):
"""
Invalidate the session key established by nonce exchange.
Does not invalidate the root key set up by sendKeyData.
"""
query = self.formatQuery('1800 080046 f0030103 21 ff 000000')
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 21 00 000000')
def setupDownload(self, contentid, keyenckey, sessionkey):
"""
Prepare the download of a music track to the device.
contentid (str)
20 bytes Unique Identifier for the DRM system.
keyenckey (str)
8 bytes DES key used to encrypt the block data keys
sessionkey (str)
8 bytes DES key used for securing the current session, the key
has to be calculated by the caller from the data exchanged in
sessionKeyExchange and the root key selected by sendKeyData
"""
if DES is None:
raise ImportError('Crypto.Cypher.DES not found, you cannot '
'download tracks')
if len(contentid) != 20:
raise ValueError('Supplied Content ID length wrong')
if len(keyenckey) != 8:
raise ValueError('Supplied Key Encryption Key length wrong')
if len(sessionkey) != 8:
raise ValueError('Supplied Session Key length wrong')
encrypter = DES.new(sessionkey, DES.MODE_CBC, '\0\0\0\0\0\0\0\0')
encryptedarg = encrypter.encrypt('\1\1\1\1' + contentid + keyenckey);
query = self.formatQuery('1800 080046 f0030103 22 ff 0000 %*', encryptedarg)
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 22 00 0000')
def commitTrack(self, tracknum, sessionkey):
"""
Commit a track. The idea is that this command tells the device
that the license for the track has been checked out from the
computer.
track (int)
Track number returned from downloading command
sessionkey (str)
8-byte DES key used for securing the download session
"""
if DES is None:
raise ImportError('Crypto.Cypher.DES not found, you cannot '
'download tracks')
if len(sessionkey) != 8:
raise ValueError('Supplied Session Key length wrong')
encrypter = DES.new(sessionkey, DES.MODE_ECB)
authentication = encrypter.encrypt('\0\0\0\0\0\0\0\0')
query = self.formatQuery('1800 080046 f0030103 48 ff 00 1001 %w %*',
tracknum, authentication)
reply = self.send_query(query)
return self.scanQuery(reply, '1800 080046 f0030103 48 00 00 1001 %?%?')
def sendTrack(self, wireformat, diskformat, frames, pktcount, packets, sessionkey):
"""
Send a track to the NetMD unit.
wireformat (int)
The format of the data sent over the USB link.
one of WIREFORMAT_PCM, WIREFORMAT_LP2, WIREFORMAT_105KBPS or
WIREFORMAT_LP4
diskformat (int)
The format of the data on the MD medium.
one of DISKFORMAT_SP_STEREO, DISKFORMAT_LP2 or DISKFORMAT_LP4.
frames (int)
The number of frames to transfer. The frame size depends on
the wire format. It's 2048 bytes for WIREFORMAT_PCM, 192 bytes
for WIREFORMAT_LP2, 152 bytes for WIREFORMAT_105KBPS and 92 bytes
for WIREFORMAT_LP4.
pktcount (int)
Number of data packets to send (needed to calculate the raw
packetized stream size
packets (iterator)
iterator over (str, str, str), with the first string being the
encrypted DES encryption key for this packet (8 bytes), the second
the IV (8 bytes, too) and the third string the encrypted data.
sessionkey (str)
8-byte DES key used for securing the download session
Returns
A tuple (tracknum, UUID, content ID).
tracknum (int)
the number the new track got.
UUID (str)
an 8-byte-value to recognize this track for check-in purpose
content ID
the content ID. Should always be the same as passed to
setupDownload, probably present to prevent some attack vectors
to the DRM system.
"""
if DES is None:
raise ImportError('Crypto.Cypher.DES not found, you cannot '
'download tracks')
if len(sessionkey) != 8:
raise ValueError('Supplied Session Key length wrong')
framesizedict = {
WIREFORMAT_PCM: 2048,
WIREFORMAT_LP2: 192,
WIREFORMAT_105KBPS: 152,
WIREFORMAT_LP4: 96,
}
totalbytes = framesizedict[wireformat] * frames + pktcount * 24;
query = self.formatQuery('1800 080046 f0030103 28 ff 000100 1001' \
'ffff 00 %b %b %d %d',
wireformat, diskformat, frames, totalbytes)
reply = self.send_query(query)
self.scanQuery(reply, '1800 080046 f0030103 28 00 000100 1001 %?%? 00'\
'%*')
for (key,iv,data) in packets:
binpkt = pack('>Q',len(data)) + key + iv + data
self.net_md.writeBulk(binpkt)
reply = self.readReply()
self.net_md._getReplyLength()
(track, encryptedreply) = \
self.scanQuery(reply, '1800 080046 f0030103 28 00 000100 1001 %w 00' \
'%?%? %?%?%?%? %?%?%?%? %*')
encrypter = DES.new(sessionkey, DES.MODE_CBC, '\0\0\0\0\0\0\0\0')
replydata = encrypter.decrypt(encryptedreply)
return (track, replydata[0:8], replydata[12:32])
def getTrackUUID(self, track):
"""
Gets the DRM tracking ID for a track.
NetMD downloaded tracks have an 8-byte identifier (instead of their
content ID) stored on the MD medium. This is used to verify the
identity of a track when checking in.
track (int)
The track number
Returns
An 8-byte binary string containing the track UUID.
"""
query = self.formatQuery('1800 080046 f0030103 23 ff 1001 %w', track)
reply = self.send_query(query)
return self.scanQuery(reply,'1800 080046 f0030103 23 00 1001 %?%? %*')[0]
def retailmac(key, value, iv = 8*"\0"):
if DES is None or DES3 is None:
raise ImportError('Crypto.Cypher.DES or DES3 not found, you cannot '
'download tracks')
subkeyA = key[0:8]
beginning = value[0:-8]
end = value[-8:]
step1crypt = DES.new(subkeyA, DES.MODE_CBC, iv)
iv2 = step1crypt.encrypt(beginning)[-8:]
step2crypt = DES3.new(key, DES3.MODE_CBC, iv2)
return step2crypt.encrypt(end)
diskforwire = {
WIREFORMAT_PCM: DISKFORMAT_SP_STEREO,
WIREFORMAT_LP2: DISKFORMAT_LP2,
WIREFORMAT_105KBPS: DISKFORMAT_LP2,
WIREFORMAT_LP4: DISKFORMAT_LP4,
}
class MDSession:
def __init__(self, md_iface, ekbobject):
self.md = md_iface
self.sessionkey = None
self.md.enterSecureSession()
(chain, depth, sig) = ekbobject.getEKBDataForLeafId(self.md.getLeafID())
self.md.sendKeyData(ekbobject.getEKBID(), chain, depth, sig)
hostnonce = array.array('B',[random.randrange(255) for x in range(8)]).tostring()
devnonce = self.md.sessionKeyExchange(hostnonce)
nonce = hostnonce + devnonce
self.sessionkey = retailmac(ekbobject.getRootKey(), nonce)
def downloadtrack(self, trk):
self.md.setupDownload(trk.getContentID(), trk.getKEK(), self.sessionkey)
dataformat = trk.getDataFormat()
(track,uuid,ccid) = self.md.sendTrack(dataformat, diskforwire[dataformat], \
trk.getFramecount(), trk.getPacketcount(),
trk.getPackets(), self.sessionkey)
self.md.cacheTOC()
self.md.setTrackTitle(track,trk.getTitle())
self.md.syncTOC()
self.md.commitTrack(track, self.sessionkey)
return (track, uuid, ccid)
def __del__(self):
self.close()
def close(self):
if self.sessionkey != None:
self.md.sessionKeyForget
self.sessionkey = None
self.md.leaveSecureSession()
| gpl-2.0 | 6,562,219,316,995,415,000 | 35.322159 | 108 | 0.547116 | false |
html5lib/html5lib-python | html5lib/tests/test_encoding.py | 18 | 4820 | from __future__ import absolute_import, division, unicode_literals
import os
import pytest
from .support import get_data_files, test_dir, errorMessage, TestData as _TestData
from html5lib import HTMLParser, _inputstream
def test_basic_prescan_length():
data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
pad = 1024 - len(data) + 1
data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
assert len(data) == 1024 # Sanity
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
assert 'utf-8' == stream.charEncoding[0].name
def test_parser_reparse():
data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
pad = 10240 - len(data) + 1
data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
assert len(data) == 10240 # Sanity
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
assert 'windows-1252' == stream.charEncoding[0].name
p = HTMLParser(namespaceHTMLElements=False)
doc = p.parse(data, useChardet=False)
assert 'utf-8' == p.documentEncoding
assert doc.find(".//title").text == "Caf\u00E9"
@pytest.mark.parametrize("expected,data,kwargs", [
("utf-16le", b"\xFF\xFE", {"override_encoding": "iso-8859-2"}),
("utf-16be", b"\xFE\xFF", {"override_encoding": "iso-8859-2"}),
("utf-8", b"\xEF\xBB\xBF", {"override_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"override_encoding": "iso-8859-2", "transport_encoding": "iso-8859-3"}),
("iso-8859-2", b"<meta charset=iso-8859-3>", {"transport_encoding": "iso-8859-2"}),
("iso-8859-2", b"<meta charset=iso-8859-2>", {"same_origin_parent_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "iso-8859-2", "likely_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16be", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16le", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"likely_encoding": "iso-8859-2", "default_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"default_encoding": "iso-8859-2"}),
("windows-1252", b"", {"default_encoding": "totally-bogus-string"}),
("windows-1252", b"", {}),
])
def test_parser_args(expected, data, kwargs):
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False, **kwargs)
assert expected == stream.charEncoding[0].name
p = HTMLParser()
p.parse(data, useChardet=False, **kwargs)
assert expected == p.documentEncoding
@pytest.mark.parametrize("kwargs", [
{"override_encoding": "iso-8859-2"},
{"override_encoding": None},
{"transport_encoding": "iso-8859-2"},
{"transport_encoding": None},
{"same_origin_parent_encoding": "iso-8859-2"},
{"same_origin_parent_encoding": None},
{"likely_encoding": "iso-8859-2"},
{"likely_encoding": None},
{"default_encoding": "iso-8859-2"},
{"default_encoding": None},
{"foo_encoding": "iso-8859-2"},
{"foo_encoding": None},
])
def test_parser_args_raises(kwargs):
with pytest.raises(TypeError) as exc_info:
p = HTMLParser()
p.parse("", useChardet=False, **kwargs)
assert exc_info.value.args[0].startswith("Cannot set an encoding with a unicode input")
def param_encoding():
for filename in get_data_files("encoding"):
tests = _TestData(filename, b"data", encoding=None)
for test in tests:
yield test[b'data'], test[b'encoding']
@pytest.mark.parametrize("data, encoding", param_encoding())
def test_parser_encoding(data, encoding):
p = HTMLParser()
assert p.documentEncoding is None
p.parse(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
assert encoding == p.documentEncoding, errorMessage(data, encoding, p.documentEncoding)
@pytest.mark.parametrize("data, encoding", param_encoding())
def test_prescan_encoding(data, encoding):
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0].name, errorMessage(data, encoding, stream.charEncoding[0].name)
# pylint:disable=wrong-import-position
try:
import chardet # noqa
except ImportError:
print("chardet not found, skipping chardet tests")
else:
def test_chardet():
with open(os.path.join(test_dir, "encoding", "chardet", "test_big5.txt"), "rb") as fp:
encoding = _inputstream.HTMLInputStream(fp.read()).charEncoding
assert encoding[0].name == "big5"
# pylint:enable=wrong-import-position
| mit | 2,522,060,990,303,054,000 | 40.196581 | 109 | 0.648548 | false |
ichuang/sympy | sympy/plotting/plot_modes.py | 3 | 5242 | from plot_curve import PlotCurve
from plot_surface import PlotSurface
from sympy import pi, lambdify
from sympy.functions import sin, cos
from math import sin as p_sin
from math import cos as p_cos
def float_vec3(f):
def inner(*args):
v = f(*args)
return float(v[0]), float(v[1]), float(v[2])
return inner
class Cartesian2D(PlotCurve):
i_vars, d_vars = 'x', 'y'
intervals = [[-5, 5, 100]]
aliases = ['cartesian']
is_default = True
def _get_sympy_evaluator(self):
fy = self.d_vars[0]
x = self.t_interval.v
@float_vec3
def e(_x):
return (_x, fy.subs(x, _x), 0.0)
return e
def _get_lambda_evaluator(self):
fy = self.d_vars[0]
x = self.t_interval.v
return lambdify([x], [x, fy, 0.0])
class Cartesian3D(PlotSurface):
i_vars, d_vars = 'xy', 'z'
intervals = [[-1, 1, 40], [-1, 1, 40]]
aliases = ['cartesian', 'monge']
is_default = True
def _get_sympy_evaluator(self):
fz = self.d_vars[0]
x = self.u_interval.v
y = self.v_interval.v
@float_vec3
def e(_x, _y):
return (_x, _y, fz.subs(x, _x).subs(y, _y))
return e
def _get_lambda_evaluator(self):
fz = self.d_vars[0]
x = self.u_interval.v
y = self.v_interval.v
return lambdify([x, y], [x, y, fz])
class ParametricCurve2D(PlotCurve):
i_vars, d_vars = 't', 'xy'
intervals = [[0, 2*pi, 100]]
aliases = ['parametric']
is_default = True
def _get_sympy_evaluator(self):
fx, fy = self.d_vars
t = self.t_interval.v
@float_vec3
def e(_t):
return (fx.subs(t, _t), fy.subs(t, _t), 0.0)
return e
def _get_lambda_evaluator(self):
fx, fy = self.d_vars
t = self.t_interval.v
return lambdify([t], [fx, fy, 0.0])
class ParametricCurve3D(PlotCurve):
i_vars, d_vars = 't', 'xyz'
intervals = [[0, 2*pi, 100]]
aliases = ['parametric']
is_default = True
def _get_sympy_evaluator(self):
fx, fy, fz = self.d_vars
t = self.t_interval.v
@float_vec3
def e(_t):
return (fx.subs(t, _t), fy.subs(t, _t), fz.subs(t, _t))
return e
def _get_lambda_evaluator(self):
fx, fy, fz = self.d_vars
t = self.t_interval.v
return lambdify([t], [fx, fy, fz])
class ParametricSurface(PlotSurface):
i_vars, d_vars = 'uv', 'xyz'
intervals = [[-1, 1, 40], [-1, 1, 40]]
aliases = ['parametric']
is_default = True
def _get_sympy_evaluator(self):
fx, fy, fz = self.d_vars
u = self.u_interval.v
v = self.v_interval.v
@float_vec3
def e(_u, _v):
return (fx.subs(u, _u).subs(v, _v),
fy.subs(u, _u).subs(v, _v),
fz.subs(u, _u).subs(v, _v))
return e
def _get_lambda_evaluator(self):
fx, fy, fz = self.d_vars
u = self.u_interval.v
v = self.v_interval.v
return lambdify([u, v], [fx, fy, fz])
class Polar(PlotCurve):
i_vars, d_vars = 't', 'r'
intervals = [[0, 2*pi, 100]]
aliases = ['polar']
is_default = False
def _get_sympy_evaluator(self):
fr = self.d_vars[0]
t = self.t_interval.v
def e(_t):
_r = float(fr.subs(t, _t))
return (_r*p_cos(_t), _r*p_sin(_t), 0.0)
return e
def _get_lambda_evaluator(self):
fr = self.d_vars[0]
t = self.t_interval.v
fx, fy = fr*cos(t), fr*sin(t)
return lambdify([t], [fx, fy, 0.0])
class Cylindrical(PlotSurface):
i_vars, d_vars = 'th', 'r'
intervals = [[0, 2*pi, 40], [-1, 1, 20]]
aliases = ['cylindrical', 'polar']
is_default = False
def _get_sympy_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
h = self.v_interval.v
def e(_t, _h):
_r = float(fr.subs(t, _t).subs(h, _h))
return (_r*p_cos(_t), _r*p_sin(_t), _h)
return e
def _get_lambda_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
h = self.v_interval.v
fx, fy = fr*cos(t), fr*sin(t)
return lambdify([t, h], [fx, fy, h])
class Spherical(PlotSurface):
i_vars, d_vars = 'tp', 'r'
intervals = [[0, 2*pi, 40], [0, pi, 20]]
aliases = ['spherical']
is_default = False
def _get_sympy_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
p = self.v_interval.v
def e(_t, _p):
_r = float(fr.subs(t, _t).subs(p, _p))
return (_r*p_cos(_t)*p_sin(_p),
_r*p_sin(_t)*p_sin(_p),
_r*p_cos(_p))
return e
def _get_lambda_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
p = self.v_interval.v
fx = fr * cos(t) * sin(p)
fy = fr * sin(t) * sin(p)
fz = fr * cos(p)
return lambdify([t, p], [fx, fy, fz])
Cartesian2D._register()
Cartesian3D._register()
ParametricCurve2D._register()
ParametricCurve3D._register()
ParametricSurface._register()
Polar._register()
Cylindrical._register()
Spherical._register()
| bsd-3-clause | -1,641,263,633,310,484,200 | 26.445026 | 67 | 0.51488 | false |
AleCandido/Lab3.2 | Esercitazione15/passabanda.py | 1 | 4213 | import sys
import pylab
from scipy.optimize import curve_fit
from scipy.stats import chisqprob
import numpy as np
import getpass
users={"candi": "C:\\Users\\candi\\Documents\\GitHub\\Lab3.2\\",
"silvanamorreale":"C:\\Users\\silvanamorreale\\Documents\\GitHub\\Lab3.2\\" ,
"Studenti": "C:\\Users\\Studenti\\Desktop\\Lab3\\",
"User":"C:\\Users\\User\\Documents\\GitHub\\Lab3.2\\",
"andrea": "/home/andrea/Documenti/Da salvare 12-5-2017/Documenti/GitHub/Lab3.2/",
"viviana": "C:\\Users\\viviana\\Documents\\GitHub\\Lab3.2\\"
}
try:
user=getpass.getuser()
path=users[user]
print("buongiorno ", user, "!!!")
except:
raise Error("unknown user, please specify it and the path in the file Esercitazione*.py")
sys.path = sys.path + [path]
dir= path + "Esercitazione15/"
from BuzzLightyear import *
from lab import *
import uncertainties
import uncertainties.unumpy
###########################################################################
print("===========PASSABANDA==============")
print("valori attesi...")
def myres(r):
return uncertainties.ufloat(r, mme(r, "ohm"))
R2=myres(119.1)
C1=10.78e-9
C2=10.67e-9
R1=myres(2.68e3)
R3=myres(46.6e3)
Rp=R1*R2/(R1+R2)
C=C1
w0_exp=(1/(C*(Rp*R3)**0.5))#/(2*np.pi)
f0_exp=(1/(C*(Rp*R3)**0.5))/(2*np.pi)
Q_exp=0.5*(R3/Rp)**0.5
Dw_exp=w0_exp/Q_exp
Df_exp=f0_exp/Q_exp
R1p=myres(9.92e2) #questa resistenza è sbagliata... non so se è grave o meno, ma si spiega perchè la nostra amplificazione è minore...è compatibile con quanto ottenuto dal fit
R2p=myres(33.2e3)
R3p=myres(3.91e3)
g00=1/(R1*C*Dw_exp)*(R2p/R1p+1)
print("f0_exp={} Q_exp={} g00={}".format(f0_exp, Q_exp, g00))
pylab.figure(figsnum)
figsnum+=1
pylab.title("amplificazione passa-banda")
pylab.xlabel("frequenza [Hz]")
pylab.ylabel("A($f$)")
###############Acquisizione dati
dir_grph=dir+"grafici/"
dir = dir + "data/"
file="passabanda.txt"
f, vin, vout = loadtxt(dir+file,unpack=True)
f=f*1e3
Df=51/1e6*f
amp=vout/vin
dvout=mme(vout, "volt", "oscil")
dvin=mme(vin, "volt", "oscil")
damp=amp*((dvout/vout)**2+(dvin/vin)**2)**0.5
##############Fit...
#attenzione...A non è adimensionale...se passo dalle frequenze alle pulsazioni A dovrebbe scalare di 2*pi
g=lambda w, A, Q, w0: A*w/((w**2-w0**2)**2+w**2*w0**2/Q)**0.5
p0=(185, 10, 6.1e3)
dof=len(f)-3
pars, covs = curve_fit(g, f, amp,p0, damp, maxfev=10000)
A, Q, w0=uncertainties.correlated_values(pars, covs)
#############plot...
pylab.loglog()
pylab.errorbar(f, amp, damp,Df,fmt=".")
domain = pylab.logspace(math.log10(min(f)),math.log10(max(f)), 1000)
gdomain = g(domain, *pars)
pylab.plot(domain, gdomain)
pylab.xlim(min(domain)*0.9,max(domain)*1.1)
vint = pylab.vectorize(int)
pylab.xticks(vint((pylab.logspace(log10(min(domain)*0.9),log10(max(domain)*1.1), 5)//100)*100),vint((pylab.logspace(log10(min(domain)*0.9),log10(max(domain)*1.1), 5)//100)*100))
pylab.ylim(min(gdomain)*0.9, max(gdomain)*1.1)
pylab.yticks(vint((pylab.logspace(log10(min(gdomain)*0.9),log10(max(gdomain)*1.1), 5)//10)*10),vint((pylab.logspace(log10(min(gdomain)*0.9),log10(max(gdomain)*1.1), 5)//10)*10))
pylab.savefig(dir_grph+"passabanda.pdf")
############output parametri...
print("A={} w0={} Q_true={}".format(A, w0, Q**0.5))
for i, j in enumerate(pars):
print(i, pars[i], covs[i, i]**0.5)
chisq=np.sum((amp-g(f, *pars))**2/damp**2)
print("chisq=", chisq, dof, chisqprob(chisq,dof))
Dw=w0/Q**0.5 #larghezza di banda (si chiama w, ma è una frequenza...)...
#controllo sui parametri
print("controllo di essere davvero a -3dB")
print("2**0.5 - g(w0, A, Q, w0)/g(w0+Dw/2, A, Q, w0) = ",sqrt(2)-g(w0, A, Q, w0)/g(w0+Dw/2, A, Q, w0))
print("2**0.5 - g(w0, A, Q, w0)/g(w0-Dw/2, A, Q, w0) = ",sqrt(2)-g(w0, A, Q, w0)/g(w0-Dw/2, A, Q, w0))
A3=g(w0, A, Q, w0) #amplificazione di centrobanda (con errori...)
#A, Q, w0=uncertainties.correlated_values(pars, covs)
print("guadagno centro banda=", g(w0, A, Q, w0))
print("Risultati A={} Q={} w0={} Dw={}".format(A, Q**0.5, w0, Dw))
print("Il Q value non è compatibile con quanto atteso...")
dom=np.linspace(min(f), max(f), 1000)
integrale=np.sum(g(dom, A, Q, w0)**2)*(max(f)-min(f))/1000/A3**2
print("-----------", integrale, np.pi/2*Dw)
Df_true=integrale
| gpl-2.0 | -3,721,280,279,171,289,000 | 26.48366 | 177 | 0.633294 | false |
kmacinnis/sympy | sympy/solvers/tests/test_inequalities.py | 3 | 11948 | """Tests for tools for solving inequalities and systems of inequalities. """
from sympy import (And, Eq, FiniteSet, Ge, Gt, im, Interval, Le, Lt, Ne, oo,
Or, Q, re, S, sin, sqrt, Union)
from sympy.assumptions import assuming
from sympy.abc import x, y
from sympy.solvers.inequalities import (reduce_inequalities,
reduce_rational_inequalities)
from sympy.utilities.pytest import raises
inf = oo.evalf()
def test_reduce_poly_inequalities_real_interval():
with assuming(Q.real(x), Q.real(y)):
assert reduce_rational_inequalities(
[[Eq(x**2, 0)]], x, relational=False) == FiniteSet(0)
assert reduce_rational_inequalities(
[[Le(x**2, 0)]], x, relational=False) == FiniteSet(0)
assert reduce_rational_inequalities(
[[Lt(x**2, 0)]], x, relational=False) == S.EmptySet
assert reduce_rational_inequalities(
[[Ge(x**2, 0)]], x, relational=False) == Interval(-oo, oo)
assert reduce_rational_inequalities(
[[Gt(x**2, 0)]], x, relational=False) == FiniteSet(0).complement
assert reduce_rational_inequalities(
[[Ne(x**2, 0)]], x, relational=False) == FiniteSet(0).complement
assert reduce_rational_inequalities(
[[Eq(x**2, 1)]], x, relational=False) == FiniteSet(-1, 1)
assert reduce_rational_inequalities(
[[Le(x**2, 1)]], x, relational=False) == Interval(-1, 1)
assert reduce_rational_inequalities(
[[Lt(x**2, 1)]], x, relational=False) == Interval(-1, 1, True, True)
assert reduce_rational_inequalities([[Ge(x**2, 1)]], x, relational=False) == Union(Interval(-oo, -1), Interval(1, oo))
assert reduce_rational_inequalities(
[[Gt(x**2, 1)]], x, relational=False) == Interval(-1, 1).complement
assert reduce_rational_inequalities(
[[Ne(x**2, 1)]], x, relational=False) == FiniteSet(-1, 1).complement
assert reduce_rational_inequalities([[Eq(
x**2, 1.0)]], x, relational=False) == FiniteSet(-1.0, 1.0).evalf()
assert reduce_rational_inequalities(
[[Le(x**2, 1.0)]], x, relational=False) == Interval(-1.0, 1.0)
assert reduce_rational_inequalities([[Lt(
x**2, 1.0)]], x, relational=False) == Interval(-1.0, 1.0, True, True)
assert reduce_rational_inequalities([[Ge(x**2, 1.0)]], x, relational=False) == Union(Interval(-inf, -1.0), Interval(1.0, inf))
assert reduce_rational_inequalities([[Gt(x**2, 1.0)]], x, relational=False) == Union(Interval(-inf, -1.0, right_open=True), Interval(1.0, inf, left_open=True))
assert reduce_rational_inequalities([[Ne(
x**2, 1.0)]], x, relational=False) == FiniteSet(-1.0, 1.0).complement
s = sqrt(2)
assert reduce_rational_inequalities([[Lt(
x**2 - 1, 0), Gt(x**2 - 1, 0)]], x, relational=False) == S.EmptySet
assert reduce_rational_inequalities([[Le(x**2 - 1, 0), Ge(
x**2 - 1, 0)]], x, relational=False) == FiniteSet(-1, 1)
assert reduce_rational_inequalities([[Le(x**2 - 2, 0), Ge(x**2 - 1, 0)]], x, relational=False) == Union(Interval(-s, -1, False, False), Interval(1, s, False, False))
assert reduce_rational_inequalities([[Le(x**2 - 2, 0), Gt(x**2 - 1, 0)]], x, relational=False) == Union(Interval(-s, -1, False, True), Interval(1, s, True, False))
assert reduce_rational_inequalities([[Lt(x**2 - 2, 0), Ge(x**2 - 1, 0)]], x, relational=False) == Union(Interval(-s, -1, True, False), Interval(1, s, False, True))
assert reduce_rational_inequalities([[Lt(x**2 - 2, 0), Gt(x**2 - 1, 0)]], x, relational=False) == Union(Interval(-s, -1, True, True), Interval(1, s, True, True))
assert reduce_rational_inequalities([[Lt(x**2 - 2, 0), Ne(x**2 - 1, 0)]], x, relational=False) == Union(Interval(-s, -1, True, True), Interval(-1, 1, True, True), Interval(1, s, True, True))
def test_reduce_poly_inequalities_real_relational():
with assuming(Q.real(x), Q.real(y)):
assert reduce_rational_inequalities(
[[Eq(x**2, 0)]], x, relational=True) == Eq(x, 0)
assert reduce_rational_inequalities(
[[Le(x**2, 0)]], x, relational=True) == Eq(x, 0)
assert reduce_rational_inequalities(
[[Lt(x**2, 0)]], x, relational=True) is False
assert reduce_rational_inequalities(
[[Ge(x**2, 0)]], x, relational=True) is True
assert reduce_rational_inequalities(
[[Gt(x**2, 0)]], x, relational=True) == Or(Lt(x, 0), Gt(x, 0))
assert reduce_rational_inequalities(
[[Ne(x**2, 0)]], x, relational=True) == Or(Lt(x, 0), Gt(x, 0))
assert reduce_rational_inequalities(
[[Eq(x**2, 1)]], x, relational=True) == Or(Eq(x, -1), Eq(x, 1))
assert reduce_rational_inequalities(
[[Le(x**2, 1)]], x, relational=True) == And(Le(-1, x), Le(x, 1))
assert reduce_rational_inequalities(
[[Lt(x**2, 1)]], x, relational=True) == And(Lt(-1, x), Lt(x, 1))
assert reduce_rational_inequalities(
[[Ge(x**2, 1)]], x, relational=True) == Or(Le(x, -1), Ge(x, 1))
assert reduce_rational_inequalities(
[[Gt(x**2, 1)]], x, relational=True) == Or(Lt(x, -1), Gt(x, 1))
assert reduce_rational_inequalities([[Ne(x**2, 1)]], x, relational=True) == Or(
Lt(x, -1), And(Lt(-1, x), Lt(x, 1)), Gt(x, 1))
assert reduce_rational_inequalities(
[[Le(x**2, 1.0)]], x, relational=True) == And(Le(-1.0, x), Le(x, 1.0))
assert reduce_rational_inequalities(
[[Lt(x**2, 1.0)]], x, relational=True) == And(Lt(-1.0, x), Lt(x, 1.0))
assert reduce_rational_inequalities(
[[Ge(x**2, 1.0)]], x, relational=True) == Or(Le(x, -1.0), Ge(x, 1.0))
assert reduce_rational_inequalities(
[[Gt(x**2, 1.0)]], x, relational=True) == Or(Lt(x, -1.0), Gt(x, 1.0))
assert reduce_rational_inequalities([[Ne(x**2, 1.0)]], x, relational=True) == \
Or(Lt(x, -1.0), And(Lt(-1.0, x), Lt(x, 1.0)), Gt(x, 1.0))
def test_reduce_poly_inequalities_complex_relational():
cond = Eq(im(x), 0)
assert reduce_rational_inequalities(
[[Eq(x**2, 0)]], x, relational=True) == And(Eq(re(x), 0), cond)
assert reduce_rational_inequalities(
[[Le(x**2, 0)]], x, relational=True) == And(Eq(re(x), 0), cond)
assert reduce_rational_inequalities(
[[Lt(x**2, 0)]], x, relational=True) is False
assert reduce_rational_inequalities(
[[Ge(x**2, 0)]], x, relational=True) == cond
assert reduce_rational_inequalities([[Gt(x**2, 0)]], x, relational=True) == \
And(Or(Lt(re(x), 0), Gt(re(x), 0)), cond)
assert reduce_rational_inequalities([[Ne(x**2, 0)]], x, relational=True) == \
And(Or(Lt(re(x), 0), Gt(re(x), 0)), cond)
assert reduce_rational_inequalities([[Eq(x**2, 1)]], x, relational=True) == \
And(Or(Eq(re(x), -1), Eq(re(x), 1)), cond)
assert reduce_rational_inequalities([[Le(x**2, 1)]], x, relational=True) == \
And(And(Le(-1, re(x)), Le(re(x), 1)), cond)
assert reduce_rational_inequalities([[Lt(x**2, 1)]], x, relational=True) == \
And(And(Lt(-1, re(x)), Lt(re(x), 1)), cond)
assert reduce_rational_inequalities([[Ge(x**2, 1)]], x, relational=True) == \
And(Or(Le(re(x), -1), Ge(re(x), 1)), cond)
assert reduce_rational_inequalities([[Gt(x**2, 1)]], x, relational=True) == \
And(Or(Lt(re(x), -1), Gt(re(x), 1)), cond)
assert reduce_rational_inequalities([[Ne(x**2, 1)]], x, relational=True) == \
And(Or(Lt(re(x), -1), And(Lt(-1, re(x)), Lt(re(x), 1)), Gt(re(x), 1)), cond)
assert reduce_rational_inequalities([[Le(x**2, 1.0)]], x, relational=True) == \
And(And(Le(-1.0, re(x)), Le(re(x), 1.0)), cond)
assert reduce_rational_inequalities([[Lt(x**2, 1.0)]], x, relational=True) == \
And(And(Lt(-1.0, re(x)), Lt(re(x), 1.0)), cond)
assert reduce_rational_inequalities([[Ge(x**2, 1.0)]], x, relational=True) == \
And(Or(Le(re(x), -1.0), Ge(re(x), 1.0)), cond)
assert reduce_rational_inequalities([[Gt(x**2, 1.0)]], x, relational=True) == \
And(Or(Lt(re(x), -1.0), Gt(re(x), 1.0)), cond)
assert reduce_rational_inequalities([[Ne(x**2, 1.0)]], x, relational=True) == \
And(Or(Lt(re(x), -1.0), And(Lt(-1.0, re(x)), Lt(re(x), 1.0)), Gt(re(x), 1.0)), cond)
def test_reduce_rational_inequalities_real_relational():
def OpenInterval(a, b):
return Interval(a, b, True, True)
def LeftOpenInterval(a, b):
return Interval(a, b, True, False)
def RightOpenInterval(a, b):
return Interval(a, b, False, True)
with assuming(Q.real(x)):
assert reduce_rational_inequalities([[(x**2 + 3*x + 2)/(x**2 - 16) >= 0]], x, relational=False) == \
Union(OpenInterval(-oo, -4), Interval(-2, -1), OpenInterval(4, oo))
assert reduce_rational_inequalities([[((-2*x - 10)*(3 - x))/((x**2 + 5)*(x - 2)**2) < 0]], x, relational=False) == \
Union(OpenInterval(-5, 2), OpenInterval(2, 3))
assert reduce_rational_inequalities([[(x + 1)/(x - 5) <= 0]], x, assume=Q.real(x), relational=False) == \
RightOpenInterval(-1, 5)
assert reduce_rational_inequalities([[(x**2 + 4*x + 3)/(x - 1) > 0]], x, assume=Q.real(x), relational=False) == \
Union(OpenInterval(-3, -1), OpenInterval(1, oo))
assert reduce_rational_inequalities([[(x**2 - 16)/(x - 1)**2 < 0]], x, assume=Q.real(x), relational=False) == \
Union(OpenInterval(-4, 1), OpenInterval(1, 4))
assert reduce_rational_inequalities([[(3*x + 1)/(x + 4) >= 1]], x, assume=Q.real(x), relational=False) == \
Union(OpenInterval(-oo, -4), RightOpenInterval(S(3)/2, oo))
assert reduce_rational_inequalities([[(x - 8)/x <= 3 - x]], x, assume=Q.real(x), relational=False) == \
Union(LeftOpenInterval(-oo, -2), LeftOpenInterval(0, 4))
def test_reduce_abs_inequalities():
real = Q.real(x)
assert reduce_inequalities(
abs(x - 5) < 3, assume=real) == And(Lt(2, x), Lt(x, 8))
assert reduce_inequalities(
abs(2*x + 3) >= 8, assume=real) == Or(Le(x, -S(11)/2), Ge(x, S(5)/2))
assert reduce_inequalities(abs(x - 4) + abs(
3*x - 5) < 7, assume=real) == And(Lt(S(1)/2, x), Lt(x, 4))
assert reduce_inequalities(abs(x - 4) + abs(3*abs(x) - 5) < 7, assume=real) == Or(And(S(-2) < x, x < -1), And(S(1)/2 < x, x < 4))
raises(NotImplementedError, lambda: reduce_inequalities(abs(x - 5) < 3))
def test_reduce_inequalities_boolean():
assert reduce_inequalities(
[Eq(x**2, 0), True]) == And(Eq(re(x), 0), Eq(im(x), 0))
assert reduce_inequalities([Eq(x**2, 0), False]) is False
def test_reduce_inequalities_assume():
assert reduce_inequalities(
[Le(x**2, 1), Q.real(x)]) == And(Le(-1, x), Le(x, 1))
assert reduce_inequalities(
[Le(x**2, 1)], Q.real(x)) == And(Le(-1, x), Le(x, 1))
def test_reduce_inequalities_multivariate():
assert reduce_inequalities([Ge(x**2, 1), Ge(y**2, 1)]) == \
And(And(Or(Le(re(x), -1), Ge(re(x), 1)), Eq(im(x), 0)),
And(Or(Le(re(y), -1), Ge(re(y), 1)), Eq(im(y), 0)))
def test_reduce_inequalities_errors():
raises(NotImplementedError, lambda: reduce_inequalities(Ge(sin(x) + x, 1)))
raises(NotImplementedError, lambda: reduce_inequalities(Ge(x**2*y + y, 1)))
raises(NotImplementedError, lambda: reduce_inequalities(Ge(sqrt(2)*x, 1)))
def test_hacky_inequalities():
assert reduce_inequalities(x + y < 1, symbols=[x]) == (x < 1 - y)
assert reduce_inequalities(x + y >= 1, symbols=[x]) == (x >= 1 - y)
def test_issue_3244():
eq = -3*x**2/2 - 45*x/4 + S(33)/2 > 0
assert reduce_inequalities(eq, Q.real(x)) == \
And(x < -S(15)/4 + sqrt(401)/4, -sqrt(401)/4 - S(15)/4 < x)
| bsd-3-clause | -448,640,723,350,479,100 | 52.339286 | 198 | 0.567877 | false |
vipshop/twemproxies | tests/test_redis/common.py | 1 | 1364 | #!/usr/bin/env python
#coding: utf-8
import os
import sys
import redis
PWD = os.path.dirname(os.path.realpath(__file__))
WORKDIR = os.path.join(PWD,'../')
sys.path.append(os.path.join(WORKDIR,'lib/'))
sys.path.append(os.path.join(WORKDIR,'conf/'))
import conf
from server_modules import *
from utils import *
CLUSTER_NAME = 'ntest'
nc_verbose = int(getenv('T_VERBOSE', 5))
mbuf = int(getenv('T_MBUF', 512))
large = int(getenv('T_LARGE', 1000))
clean = int(getenv('T_CLEAN', 1))
all_redis = [
RedisServer('127.0.0.1', 2100, '/tmp/r/redis-2100/', CLUSTER_NAME, 'redis-2100'),
RedisServer('127.0.0.1', 2101, '/tmp/r/redis-2101/', CLUSTER_NAME, 'redis-2101'),
]
nc = NutCracker('127.0.0.1', 4100, '/tmp/r/nutcracker-4100', CLUSTER_NAME,
all_redis, mbuf=mbuf, verbose=nc_verbose)
def setup():
print 'setup(mbuf=%s, verbose=%s)' %(mbuf, nc_verbose)
for r in all_redis + [nc]:
r.deploy()
r.stop()
r.start()
def teardown():
for r in all_redis + [nc]:
assert(r._alive())
r.stop()
if clean: # TODO: move clean to setup
r.clean()
default_kv = {'kkk-%s' % i : 'vvv-%s' % i for i in range(10)}
def getconn():
for r in all_redis:
c = redis.Redis(r.host(), r.port())
c.flushdb()
r = redis.Redis(nc.host(), nc.port())
return r
| apache-2.0 | -4,010,993,009,019,577,000 | 23.8 | 89 | 0.58651 | false |
ric2b/Vivaldi-browser | chromium/tools/diagnosis/crbug_1001171.py | 11 | 1721 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper context wrapper for diagnosing crbug.com/1001171.
This module and all uses thereof can and should be removed once
crbug.com/1001171 has been resolved.
"""
from __future__ import print_function
import contextlib
import os
import sys
@contextlib.contextmanager
def DumpStateOnLookupError():
"""Prints potentially useful state info in the event of a LookupError."""
try:
yield
except LookupError:
print('LookupError diagnosis for crbug.com/1001171:')
for path_index, path_entry in enumerate(sys.path):
desc = 'unknown'
if not os.path.exists(path_entry):
desc = 'missing'
elif os.path.islink(path_entry):
desc = 'link -> %s' % os.path.realpath(path_entry)
elif os.path.isfile(path_entry):
desc = 'file'
elif os.path.isdir(path_entry):
desc = 'dir'
print(' sys.path[%d]: %s (%s)' % (path_index, path_entry, desc))
real_path_entry = os.path.realpath(path_entry)
if (path_entry.endswith(os.path.join('lib', 'python2.7'))
and os.path.isdir(real_path_entry)):
encodings_dir = os.path.realpath(
os.path.join(real_path_entry, 'encodings'))
if os.path.exists(encodings_dir):
if os.path.isdir(encodings_dir):
print(' %s contents: %s' % (encodings_dir,
str(os.listdir(encodings_dir))))
else:
print(' %s exists but is not a directory' % encodings_dir)
else:
print(' %s missing' % encodings_dir)
raise
| bsd-3-clause | 7,203,210,606,618,754,000 | 32.745098 | 75 | 0.62638 | false |
kosgroup/odoo | addons/web/models/ir_http.py | 14 | 2041 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import models
from odoo.http import request
import odoo
class Http(models.AbstractModel):
_inherit = 'ir.http'
def webclient_rendering_context(self):
return {
'menu_data': request.env['ir.ui.menu'].load_menus(request.debug),
'session_info': json.dumps(self.session_info()),
}
def session_info(self):
user = request.env.user
display_switch_company_menu = user.has_group('base.group_multi_company') and len(user.company_ids) > 1
version_info = odoo.service.common.exp_version()
return {
"session_id": request.session.sid,
"uid": request.session.uid,
"is_admin": request.env.user.has_group('base.group_system'),
"is_superuser": request.env.user._is_superuser(),
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"server_version": version_info.get('server_version'),
"server_version_info": version_info.get('server_version_info'),
"name": user.name,
"username": user.login,
"company_id": request.env.user.company_id.id if request.session.uid else None,
"partner_id": request.env.user.partner_id.id if request.session.uid and request.env.user.partner_id else None,
"user_companies": {'current_company': (user.company_id.id, user.company_id.name), 'allowed_companies': [(comp.id, comp.name) for comp in user.company_ids]} if display_switch_company_menu else False,
"currencies": self.get_currencies(),
}
def get_currencies(self):
Currency = request.env['res.currency']
currencies = Currency.search([]).read(['symbol', 'position', 'decimal_places'])
return { c['id']: {'symbol': c['symbol'], 'position': c['position'], 'digits': [69,c['decimal_places']]} for c in currencies}
| gpl-3.0 | 9,124,184,406,343,638,000 | 44.355556 | 210 | 0.624694 | false |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/testing/__init__.py | 10 | 3767 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from contextlib import contextmanager
from matplotlib.cbook import is_string_like, iterable
from matplotlib import rcParams, rcdefaults, use
def _is_list_like(obj):
"""Returns whether the obj is iterable and not a string"""
return not is_string_like(obj) and iterable(obj)
# stolen from pandas
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not _is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
def set_font_settings_for_testing():
rcParams['font.family'] = 'DejaVu Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
set_font_settings_for_testing()
| mit | 6,350,728,815,843,813,000 | 34.537736 | 78 | 0.609504 | false |
ic-labs/django-icekit | icekit/abstract_models.py | 1 | 4379 | """
Models for ``icekit`` app.
"""
# Compose concrete models from abstract models and mixins, to facilitate reuse.
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.template.loader import get_template
from django.utils import encoding, timezone
from django.utils.translation import ugettext_lazy as _
from fluent_contents.analyzer import get_template_placeholder_data
from icekit.admin_tools.filters import ChildModelFilter
from . import fields, plugins
class AbstractBaseModel(models.Model):
"""
Abstract base model.
"""
created = models.DateTimeField(
default=timezone.now, db_index=True, editable=False)
modified = models.DateTimeField(
default=timezone.now, db_index=True, editable=False)
class Meta:
abstract = True
get_latest_by = 'pk'
ordering = ('-id', )
def save(self, *args, **kwargs):
"""
Update ``self.modified``.
"""
self.modified = timezone.now()
super(AbstractBaseModel, self).save(*args, **kwargs)
@encoding.python_2_unicode_compatible
class AbstractLayout(AbstractBaseModel):
"""
An implementation of ``fluent_pages.models.db.PageLayout`` that uses
plugins to get template name choices instead of scanning a directory given
in settings.
"""
title = models.CharField(_('title'), max_length=255)
template_name = fields.TemplateNameField(
_('template'),
plugin_class=plugins.TemplateNameFieldChoicesPlugin,
unique=True,
)
content_types = models.ManyToManyField(
ContentType,
help_text='Types of content for which this layout will be allowed.',
)
class Meta:
abstract = True
ordering = ('title',)
def __str__(self):
return self.title
@classmethod
def auto_add(cls, template_name, *models, **kwargs):
"""
Get or create a layout for the given template and add content types for
the given models to it. Append the verbose name of each model to the
title with the given ``separator`` keyword argument.
"""
separator = kwargs.get('separator', ', ')
content_types = ContentType.objects.get_for_models(*models).values()
try:
# Get.
layout = cls.objects.get(template_name=template_name)
except cls.DoesNotExist:
# Create.
title = separator.join(sorted(
ct.model_class()._meta.verbose_name for ct in content_types))
layout = cls.objects.create(
template_name=template_name,
title=title,
)
layout.content_types.add(*content_types)
else:
title = [layout.title]
# Update.
for ct in content_types:
if not layout.content_types.filter(pk=ct.pk).exists():
title.append(ct.model_class()._meta.verbose_name)
layout.title = separator.join(sorted(title))
layout.save()
layout.content_types.add(*content_types)
return layout
def get_placeholder_data(self):
"""
Return placeholder data for this layout's template.
"""
return get_template_placeholder_data(self.get_template())
def get_template(self):
"""
Return the template to render this layout.
"""
return get_template(self.template_name)
@encoding.python_2_unicode_compatible
class AbstractMediaCategory(AbstractBaseModel):
"""
A categorisation model for Media assets.
"""
name = models.CharField(
max_length=255,
unique=True,
)
class Meta:
abstract = True
def __str__(self):
return self.name
class BoostedTermsMixin(models.Model):
"""
Mixin for providing a field for terms which will get boosted search
priority.
"""
boosted_terms = models.TextField(
blank=True,
default='', # This is for convenience when adding models in the shell.
help_text=_(
'Words (space separated) added here are boosted in relevance for search results '
'increasing the chance of this appearing higher in the search results.'
),
verbose_name=_('Boosted Search Terms'),
)
class Meta:
abstract = True
| mit | -4,701,846,451,494,734,000 | 29.622378 | 93 | 0.62343 | false |
plotly/plotly.py | packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/_tickfont.py | 2 | 1580 | import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickfont",
parent_name="histogram2dcontour.colorbar",
**kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
| mit | 3,341,459,364,958,832,600 | 36.619048 | 72 | 0.536076 | false |
donspaulding/adspygoogle | examples/adspygoogle/dfp/v201206/update_creatives.py | 2 | 2356 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the destination URL of all image creatives up to
the first 500. To determine which image creatives exist, run
get_all_creatives.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201206')
# Create statement object to get all image creatives.
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'ImageCreative'
}
}]
filter_statement = {'query': 'WHERE creativeType = :type LIMIT 500',
'values': values}
# Get creatives by statement.
response = creative_service.GetCreativesByStatement(filter_statement)[0]
creatives = []
if 'results' in response:
creatives = response['results']
if creatives:
# Update each local creative object by changing its destination URL.
for creative in creatives:
creative['destinationUrl'] = 'http://news.google.com'
# Update creatives remotely.
creatives = creative_service.UpdateCreatives(creatives)
# Display results.
if creatives:
for creative in creatives:
print ('Image creative with id \'%s\' and destination URL \'%s\' was '
'updated.' % (creative['id'], creative['destinationUrl']))
else:
print 'No orders were updated.'
else:
print 'No orders found to update.'
| apache-2.0 | 9,132,696,863,083,803,000 | 31.722222 | 80 | 0.703735 | false |
proxysh/Safejumper-for-Mac | buildlinux/env32/lib/python2.7/site-packages/twisted/mail/test/test_pop3.py | 10 | 30479 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for Ltwisted.mail.pop3} module.
"""
from __future__ import print_function
import StringIO
import hmac
import base64
import itertools
from collections import OrderedDict
from zope.interface import implementer
from twisted.internet import defer
from twisted.trial import unittest, util
from twisted import mail
import twisted.mail.protocols
import twisted.mail.pop3
import twisted.internet.protocol
from twisted import internet
from twisted.mail import pop3
from twisted.protocols import loopback
from twisted.python import failure
from twisted import cred
import twisted.cred.portal
import twisted.cred.checkers
import twisted.cred.credentials
from twisted.test.proto_helpers import LineSendingProtocol
class UtilityTests(unittest.TestCase):
"""
Test the various helper functions and classes used by the POP3 server
protocol implementation.
"""
def testLineBuffering(self):
"""
Test creating a LineBuffer and feeding it some lines. The lines should
build up in its internal buffer for a while and then get spat out to
the writer.
"""
output = []
input = iter(itertools.cycle(['012', '345', '6', '7', '8', '9']))
c = pop3._IteratorBuffer(output.extend, input, 6)
i = iter(c)
self.assertEqual(output, []) # nothing is buffer
i.next()
self.assertEqual(output, []) # '012' is buffered
i.next()
self.assertEqual(output, []) # '012345' is buffered
i.next()
self.assertEqual(output, ['012', '345', '6']) # nothing is buffered
for n in range(5):
i.next()
self.assertEqual(output, ['012', '345', '6', '7', '8', '9', '012', '345'])
def testFinishLineBuffering(self):
"""
Test that a LineBuffer flushes everything when its iterator is
exhausted, and itself raises StopIteration.
"""
output = []
input = iter(['a', 'b', 'c'])
c = pop3._IteratorBuffer(output.extend, input, 5)
for i in c:
pass
self.assertEqual(output, ['a', 'b', 'c'])
def testSuccessResponseFormatter(self):
"""
Test that the thing that spits out POP3 'success responses' works
right.
"""
self.assertEqual(
pop3.successResponse('Great.'),
'+OK Great.\r\n')
def testStatLineFormatter(self):
"""
Test that the function which formats stat lines does so appropriately.
"""
statLine = list(pop3.formatStatResponse([]))[-1]
self.assertEqual(statLine, '+OK 0 0\r\n')
statLine = list(pop3.formatStatResponse([10, 31, 0, 10101]))[-1]
self.assertEqual(statLine, '+OK 4 10142\r\n')
def testListLineFormatter(self):
"""
Test that the function which formats the lines in response to a LIST
command does so appropriately.
"""
listLines = list(pop3.formatListResponse([]))
self.assertEqual(
listLines,
['+OK 0\r\n', '.\r\n'])
listLines = list(pop3.formatListResponse([1, 2, 3, 100]))
self.assertEqual(
listLines,
['+OK 4\r\n', '1 1\r\n', '2 2\r\n', '3 3\r\n', '4 100\r\n', '.\r\n'])
def testUIDListLineFormatter(self):
"""
Test that the function which formats lines in response to a UIDL
command does so appropriately.
"""
UIDs = ['abc', 'def', 'ghi']
listLines = list(pop3.formatUIDListResponse([], UIDs.__getitem__))
self.assertEqual(
listLines,
['+OK \r\n', '.\r\n'])
listLines = list(pop3.formatUIDListResponse([123, 431, 591], UIDs.__getitem__))
self.assertEqual(
listLines,
['+OK \r\n', '1 abc\r\n', '2 def\r\n', '3 ghi\r\n', '.\r\n'])
listLines = list(pop3.formatUIDListResponse([0, None, 591], UIDs.__getitem__))
self.assertEqual(
listLines,
['+OK \r\n', '1 abc\r\n', '3 ghi\r\n', '.\r\n'])
class MyVirtualPOP3(mail.protocols.VirtualPOP3):
magic = '<moshez>'
def authenticateUserAPOP(self, user, digest):
user, domain = self.lookupDomain(user)
return self.service.domains['baz.com'].authenticateUserAPOP(user, digest, self.magic, domain)
class DummyDomain:
def __init__(self):
self.users = {}
def addUser(self, name):
self.users[name] = []
def addMessage(self, name, message):
self.users[name].append(message)
def authenticateUserAPOP(self, name, digest, magic, domain):
return pop3.IMailbox, ListMailbox(self.users[name]), lambda: None
class ListMailbox:
def __init__(self, list):
self.list = list
def listMessages(self, i=None):
if i is None:
return map(len, self.list)
return len(self.list[i])
def getMessage(self, i):
return StringIO.StringIO(self.list[i])
def getUidl(self, i):
return i
def deleteMessage(self, i):
self.list[i] = ''
def sync(self):
pass
class MyPOP3Downloader(pop3.POP3Client):
def handle_WELCOME(self, line):
pop3.POP3Client.handle_WELCOME(self, line)
self.apop('[email protected]', 'world')
def handle_APOP(self, line):
parts = line.split()
code = parts[0]
if code != '+OK':
raise AssertionError('code is: %s , parts is: %s ' % (code, parts))
self.lines = []
self.retr(1)
def handle_RETR_continue(self, line):
self.lines.append(line)
def handle_RETR_end(self):
self.message = '\n'.join(self.lines) + '\n'
self.quit()
def handle_QUIT(self, line):
if line[:3] != '+OK':
raise AssertionError('code is ' + line)
class POP3Tests(unittest.TestCase):
message = '''\
Subject: urgent
Someone set up us the bomb!
'''
expectedOutput = '''\
+OK <moshez>\015
+OK Authentication succeeded\015
+OK \015
1 0\015
.\015
+OK %d\015
Subject: urgent\015
\015
Someone set up us the bomb!\015
.\015
+OK \015
''' % len(message)
def setUp(self):
self.factory = internet.protocol.Factory()
self.factory.domains = {}
self.factory.domains['baz.com'] = DummyDomain()
self.factory.domains['baz.com'].addUser('hello')
self.factory.domains['baz.com'].addMessage('hello', self.message)
def testMessages(self):
client = LineSendingProtocol([
'APOP [email protected] world',
'UIDL',
'RETR 1',
'QUIT',
])
server = MyVirtualPOP3()
server.service = self.factory
def check(ignored):
output = '\r\n'.join(client.response) + '\r\n'
self.assertEqual(output, self.expectedOutput)
return loopback.loopbackTCP(server, client).addCallback(check)
def testLoopback(self):
protocol = MyVirtualPOP3()
protocol.service = self.factory
clientProtocol = MyPOP3Downloader()
def check(ignored):
self.assertEqual(clientProtocol.message, self.message)
protocol.connectionLost(
failure.Failure(Exception("Test harness disconnect")))
d = loopback.loopbackAsync(protocol, clientProtocol)
return d.addCallback(check)
testLoopback.suppress = [util.suppress(message="twisted.mail.pop3.POP3Client is deprecated")]
class DummyPOP3(pop3.POP3):
magic = '<moshez>'
def authenticateUserAPOP(self, user, password):
return pop3.IMailbox, DummyMailbox(ValueError), lambda: None
class DummyMailbox(pop3.Mailbox):
messages = ['From: moshe\nTo: moshe\n\nHow are you, friend?\n']
def __init__(self, exceptionType):
self.messages = DummyMailbox.messages[:]
self.exceptionType = exceptionType
def listMessages(self, i=None):
if i is None:
return map(len, self.messages)
if i >= len(self.messages):
raise self.exceptionType()
return len(self.messages[i])
def getMessage(self, i):
return StringIO.StringIO(self.messages[i])
def getUidl(self, i):
if i >= len(self.messages):
raise self.exceptionType()
return str(i)
def deleteMessage(self, i):
self.messages[i] = ''
class AnotherPOP3Tests(unittest.TestCase):
def runTest(self, lines, expectedOutput):
dummy = DummyPOP3()
client = LineSendingProtocol(lines)
d = loopback.loopbackAsync(dummy, client)
return d.addCallback(self._cbRunTest, client, dummy, expectedOutput)
def _cbRunTest(self, ignored, client, dummy, expectedOutput):
self.assertEqual('\r\n'.join(expectedOutput),
'\r\n'.join(client.response))
dummy.connectionLost(failure.Failure(Exception("Test harness disconnect")))
return ignored
def test_buffer(self):
"""
Test a lot of different POP3 commands in an extremely pipelined
scenario.
This test may cover legitimate behavior, but the intent and
granularity are not very good. It would likely be an improvement to
split it into a number of smaller, more focused tests.
"""
return self.runTest(
["APOP moshez dummy",
"LIST",
"UIDL",
"RETR 1",
"RETR 2",
"DELE 1",
"RETR 1",
"QUIT"],
['+OK <moshez>',
'+OK Authentication succeeded',
'+OK 1',
'1 44',
'.',
'+OK ',
'1 0',
'.',
'+OK 44',
'From: moshe',
'To: moshe',
'',
'How are you, friend?',
'.',
'-ERR Bad message number argument',
'+OK ',
'-ERR message deleted',
'+OK '])
def test_noop(self):
"""
Test the no-op command.
"""
return self.runTest(
['APOP spiv dummy',
'NOOP',
'QUIT'],
['+OK <moshez>',
'+OK Authentication succeeded',
'+OK ',
'+OK '])
def testAuthListing(self):
p = DummyPOP3()
p.factory = internet.protocol.Factory()
p.factory.challengers = {'Auth1': None, 'secondAuth': None, 'authLast': None}
client = LineSendingProtocol([
"AUTH",
"QUIT",
])
d = loopback.loopbackAsync(p, client)
return d.addCallback(self._cbTestAuthListing, client)
def _cbTestAuthListing(self, ignored, client):
self.assertTrue(client.response[1].startswith('+OK'))
self.assertEqual(sorted(client.response[2:5]),
["AUTH1", "AUTHLAST", "SECONDAUTH"])
self.assertEqual(client.response[5], ".")
def testIllegalPASS(self):
dummy = DummyPOP3()
client = LineSendingProtocol([
"PASS fooz",
"QUIT"
])
d = loopback.loopbackAsync(dummy, client)
return d.addCallback(self._cbTestIllegalPASS, client, dummy)
def _cbTestIllegalPASS(self, ignored, client, dummy):
expected_output = '+OK <moshez>\r\n-ERR USER required before PASS\r\n+OK \r\n'
self.assertEqual(expected_output, '\r\n'.join(client.response) + '\r\n')
dummy.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def testEmptyPASS(self):
dummy = DummyPOP3()
client = LineSendingProtocol([
"PASS ",
"QUIT"
])
d = loopback.loopbackAsync(dummy, client)
return d.addCallback(self._cbTestEmptyPASS, client, dummy)
def _cbTestEmptyPASS(self, ignored, client, dummy):
expected_output = '+OK <moshez>\r\n-ERR USER required before PASS\r\n+OK \r\n'
self.assertEqual(expected_output, '\r\n'.join(client.response) + '\r\n')
dummy.connectionLost(failure.Failure(Exception("Test harness disconnect")))
@implementer(pop3.IServerFactory)
class TestServerFactory:
def cap_IMPLEMENTATION(self):
return "Test Implementation String"
def cap_EXPIRE(self):
return 60
challengers = OrderedDict([("SCHEME_1", None), ("SCHEME_2", None)])
def cap_LOGIN_DELAY(self):
return 120
pue = True
def perUserExpiration(self):
return self.pue
puld = True
def perUserLoginDelay(self):
return self.puld
class TestMailbox:
loginDelay = 100
messageExpiration = 25
class CapabilityTests(unittest.TestCase):
def setUp(self):
s = StringIO.StringIO()
p = pop3.POP3()
p.factory = TestServerFactory()
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.do_CAPA()
self.caps = p.listCapabilities()
self.pcaps = s.getvalue().splitlines()
s = StringIO.StringIO()
p.mbox = TestMailbox()
p.transport = internet.protocol.FileWrapper(s)
p.do_CAPA()
self.lpcaps = s.getvalue().splitlines()
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def contained(self, s, *caps):
for c in caps:
self.assertIn(s, c)
def testUIDL(self):
self.contained("UIDL", self.caps, self.pcaps, self.lpcaps)
def testTOP(self):
self.contained("TOP", self.caps, self.pcaps, self.lpcaps)
def testUSER(self):
self.contained("USER", self.caps, self.pcaps, self.lpcaps)
def testEXPIRE(self):
self.contained("EXPIRE 60 USER", self.caps, self.pcaps)
self.contained("EXPIRE 25", self.lpcaps)
def testIMPLEMENTATION(self):
self.contained(
"IMPLEMENTATION Test Implementation String",
self.caps, self.pcaps, self.lpcaps
)
def testSASL(self):
self.contained(
"SASL SCHEME_1 SCHEME_2",
self.caps, self.pcaps, self.lpcaps
)
def testLOGIN_DELAY(self):
self.contained("LOGIN-DELAY 120 USER", self.caps, self.pcaps)
self.assertIn("LOGIN-DELAY 100", self.lpcaps)
class GlobalCapabilitiesTests(unittest.TestCase):
def setUp(self):
s = StringIO.StringIO()
p = pop3.POP3()
p.factory = TestServerFactory()
p.factory.pue = p.factory.puld = False
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.do_CAPA()
self.caps = p.listCapabilities()
self.pcaps = s.getvalue().splitlines()
s = StringIO.StringIO()
p.mbox = TestMailbox()
p.transport = internet.protocol.FileWrapper(s)
p.do_CAPA()
self.lpcaps = s.getvalue().splitlines()
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def contained(self, s, *caps):
for c in caps:
self.assertIn(s, c)
def testEXPIRE(self):
self.contained("EXPIRE 60", self.caps, self.pcaps, self.lpcaps)
def testLOGIN_DELAY(self):
self.contained("LOGIN-DELAY 120", self.caps, self.pcaps, self.lpcaps)
class TestRealm:
def requestAvatar(self, avatarId, mind, *interfaces):
if avatarId == 'testuser':
return pop3.IMailbox, DummyMailbox(ValueError), lambda: None
assert False
class SASLTests(unittest.TestCase):
def testValidLogin(self):
p = pop3.POP3()
p.factory = TestServerFactory()
p.factory.challengers = {'CRAM-MD5': cred.credentials.CramMD5Credentials}
p.portal = cred.portal.Portal(TestRealm())
ch = cred.checkers.InMemoryUsernamePasswordDatabaseDontUse()
ch.addUser('testuser', 'testpassword')
p.portal.registerChecker(ch)
s = StringIO.StringIO()
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.lineReceived("CAPA")
self.assertTrue(s.getvalue().find("SASL CRAM-MD5") >= 0)
p.lineReceived("AUTH CRAM-MD5")
chal = s.getvalue().splitlines()[-1][2:]
chal = base64.decodestring(chal)
response = hmac.HMAC('testpassword', chal).hexdigest()
p.lineReceived(base64.encodestring('testuser ' + response).rstrip('\n'))
self.assertTrue(p.mbox)
self.assertTrue(s.getvalue().splitlines()[-1].find("+OK") >= 0)
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
class CommandMixin:
"""
Tests for all the commands a POP3 server is allowed to receive.
"""
extraMessage = '''\
From: guy
To: fellow
More message text for you.
'''
def setUp(self):
"""
Make a POP3 server protocol instance hooked up to a simple mailbox and
a transport that buffers output to a StringIO.
"""
p = pop3.POP3()
p.mbox = self.mailboxType(self.exceptionType)
p.schedule = list
self.pop3Server = p
s = StringIO.StringIO()
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
s.truncate(0)
self.pop3Transport = s
def tearDown(self):
"""
Disconnect the server protocol so it can clean up anything it might
need to clean up.
"""
self.pop3Server.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def _flush(self):
"""
Do some of the things that the reactor would take care of, if the
reactor were actually running.
"""
# Oh man FileWrapper is pooh.
self.pop3Server.transport._checkProducer()
def testLIST(self):
"""
Test the two forms of list: with a message index number, which should
return a short-form response, and without a message index number, which
should return a long-form response, one line per message.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("LIST 1")
self._flush()
self.assertEqual(s.getvalue(), "+OK 1 44\r\n")
s.truncate(0)
p.lineReceived("LIST")
self._flush()
self.assertEqual(s.getvalue(), "+OK 1\r\n1 44\r\n.\r\n")
def testLISTWithBadArgument(self):
"""
Test that non-integers and out-of-bound integers produce appropriate
error responses.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("LIST a")
self.assertEqual(
s.getvalue(),
"-ERR Invalid message-number: 'a'\r\n")
s.truncate(0)
p.lineReceived("LIST 0")
self.assertEqual(
s.getvalue(),
"-ERR Invalid message-number: 0\r\n")
s.truncate(0)
p.lineReceived("LIST 2")
self.assertEqual(
s.getvalue(),
"-ERR Invalid message-number: 2\r\n")
s.truncate(0)
def testUIDL(self):
"""
Test the two forms of the UIDL command. These are just like the two
forms of the LIST command.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("UIDL 1")
self.assertEqual(s.getvalue(), "+OK 0\r\n")
s.truncate(0)
p.lineReceived("UIDL")
self._flush()
self.assertEqual(s.getvalue(), "+OK \r\n1 0\r\n.\r\n")
def testUIDLWithBadArgument(self):
"""
Test that UIDL with a non-integer or an out-of-bounds integer produces
the appropriate error response.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("UIDL a")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("UIDL 0")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("UIDL 2")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
def testSTAT(self):
"""
Test the single form of the STAT command, which returns a short-form
response of the number of messages in the mailbox and their total size.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("STAT")
self._flush()
self.assertEqual(s.getvalue(), "+OK 1 44\r\n")
def testRETR(self):
"""
Test downloading a message.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("RETR 1")
self._flush()
self.assertEqual(
s.getvalue(),
"+OK 44\r\n"
"From: moshe\r\n"
"To: moshe\r\n"
"\r\n"
"How are you, friend?\r\n"
".\r\n")
s.truncate(0)
def testRETRWithBadArgument(self):
"""
Test that trying to download a message with a bad argument, either not
an integer or an out-of-bounds integer, fails with the appropriate
error response.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("RETR a")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("RETR 0")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("RETR 2")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
def testTOP(self):
"""
Test downloading the headers and part of the body of a message.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived("TOP 1 0")
self._flush()
self.assertEqual(
s.getvalue(),
"+OK Top of message follows\r\n"
"From: moshe\r\n"
"To: moshe\r\n"
"\r\n"
".\r\n")
def testTOPWithBadArgument(self):
"""
Test that trying to download a message with a bad argument, either a
message number which isn't an integer or is an out-of-bounds integer or
a number of lines which isn't an integer or is a negative integer,
fails with the appropriate error response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived("TOP 1 a")
self.assertEqual(
s.getvalue(),
"-ERR Bad line count argument\r\n")
s.truncate(0)
p.lineReceived("TOP 1 -1")
self.assertEqual(
s.getvalue(),
"-ERR Bad line count argument\r\n")
s.truncate(0)
p.lineReceived("TOP a 1")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("TOP 0 1")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("TOP 3 1")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
def testLAST(self):
"""
Test the exceedingly pointless LAST command, which tells you the
highest message index which you have already downloaded.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
"+OK 0\r\n")
s.truncate(0)
def testRetrieveUpdatesHighest(self):
"""
Test that issuing a RETR command updates the LAST response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('RETR 2')
self._flush()
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 2\r\n')
s.truncate(0)
def testTopUpdatesHighest(self):
"""
Test that issuing a TOP command updates the LAST response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('TOP 2 10')
self._flush()
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 2\r\n')
def testHighestOnlyProgresses(self):
"""
Test that downloading a message with a smaller index than the current
LAST response doesn't change the LAST response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('RETR 2')
self._flush()
p.lineReceived('TOP 1 10')
self._flush()
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 2\r\n')
def testResetClearsHighest(self):
"""
Test that issuing RSET changes the LAST response to 0.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('RETR 2')
self._flush()
p.lineReceived('RSET')
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 0\r\n')
_listMessageDeprecation = (
"twisted.mail.pop3.IMailbox.listMessages may not "
"raise IndexError for out-of-bounds message numbers: "
"raise ValueError instead.")
_listMessageSuppression = util.suppress(
message=_listMessageDeprecation,
category=PendingDeprecationWarning)
_getUidlDeprecation = (
"twisted.mail.pop3.IMailbox.getUidl may not "
"raise IndexError for out-of-bounds message numbers: "
"raise ValueError instead.")
_getUidlSuppression = util.suppress(
message=_getUidlDeprecation,
category=PendingDeprecationWarning)
class IndexErrorCommandTests(CommandMixin, unittest.TestCase):
"""
Run all of the command tests against a mailbox which raises IndexError
when an out of bounds request is made. This behavior will be deprecated
shortly and then removed.
"""
exceptionType = IndexError
mailboxType = DummyMailbox
def testLISTWithBadArgument(self):
return CommandMixin.testLISTWithBadArgument(self)
testLISTWithBadArgument.suppress = [_listMessageSuppression]
def testUIDLWithBadArgument(self):
return CommandMixin.testUIDLWithBadArgument(self)
testUIDLWithBadArgument.suppress = [_getUidlSuppression]
def testTOPWithBadArgument(self):
return CommandMixin.testTOPWithBadArgument(self)
testTOPWithBadArgument.suppress = [_listMessageSuppression]
def testRETRWithBadArgument(self):
return CommandMixin.testRETRWithBadArgument(self)
testRETRWithBadArgument.suppress = [_listMessageSuppression]
class ValueErrorCommandTests(CommandMixin, unittest.TestCase):
"""
Run all of the command tests against a mailbox which raises ValueError
when an out of bounds request is made. This is the correct behavior and
after support for mailboxes which raise IndexError is removed, this will
become just C{CommandTestCase}.
"""
exceptionType = ValueError
mailboxType = DummyMailbox
class SyncDeferredMailbox(DummyMailbox):
"""
Mailbox which has a listMessages implementation which returns a Deferred
which has already fired.
"""
def listMessages(self, n=None):
return defer.succeed(DummyMailbox.listMessages(self, n))
class IndexErrorSyncDeferredCommandTests(IndexErrorCommandTests):
"""
Run all of the L{IndexErrorCommandTests} tests with a
synchronous-Deferred returning IMailbox implementation.
"""
mailboxType = SyncDeferredMailbox
class ValueErrorSyncDeferredCommandTests(ValueErrorCommandTests):
"""
Run all of the L{ValueErrorCommandTests} tests with a
synchronous-Deferred returning IMailbox implementation.
"""
mailboxType = SyncDeferredMailbox
class AsyncDeferredMailbox(DummyMailbox):
"""
Mailbox which has a listMessages implementation which returns a Deferred
which has not yet fired.
"""
def __init__(self, *a, **kw):
self.waiting = []
DummyMailbox.__init__(self, *a, **kw)
def listMessages(self, n=None):
d = defer.Deferred()
# See AsyncDeferredMailbox._flush
self.waiting.append((d, DummyMailbox.listMessages(self, n)))
return d
class IndexErrorAsyncDeferredCommandTests(IndexErrorCommandTests):
"""
Run all of the L{IndexErrorCommandTests} tests with an asynchronous-Deferred
returning IMailbox implementation.
"""
mailboxType = AsyncDeferredMailbox
def _flush(self):
"""
Fire whatever Deferreds we've built up in our mailbox.
"""
while self.pop3Server.mbox.waiting:
d, a = self.pop3Server.mbox.waiting.pop()
d.callback(a)
IndexErrorCommandTests._flush(self)
class ValueErrorAsyncDeferredCommandTests(ValueErrorCommandTests):
"""
Run all of the L{IndexErrorCommandTests} tests with an asynchronous-Deferred
returning IMailbox implementation.
"""
mailboxType = AsyncDeferredMailbox
def _flush(self):
"""
Fire whatever Deferreds we've built up in our mailbox.
"""
while self.pop3Server.mbox.waiting:
d, a = self.pop3Server.mbox.waiting.pop()
d.callback(a)
ValueErrorCommandTests._flush(self)
class POP3MiscTests(unittest.TestCase):
"""
Miscellaneous tests more to do with module/package structure than
anything to do with the Post Office Protocol.
"""
def test_all(self):
"""
This test checks that all names listed in
twisted.mail.pop3.__all__ are actually present in the module.
"""
mod = twisted.mail.pop3
for attr in mod.__all__:
self.assertTrue(hasattr(mod, attr))
| gpl-2.0 | -2,474,074,347,284,726,000 | 27.45845 | 101 | 0.596443 | false |
TrampolineRTOS/trampoline | machines/msp430x/small/msp430fr5994/launchpad/serial/serial.py | 2 | 2173 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
from math import *
# This simple python script computes baud-rate settings
# for different input frequencies
# The algorithm is extracted from the user's guide
# (slau367o.pdf), p.776
# the 2 tabulars fracFreq and ucbrsTab are extracted from
# table 30-4 of this user's guide
# tabular DCOfreq gives the frequencies of the DCO from
# its configuration (dcofsel[3..1] | dcorsel).
# the algorithm is the one in "Baud-rate settings quick set up"
fracFreq = [
0.0000, 0.0529, 0.0715, 0.0835,
0.1001, 0.1252, 0.1430, 0.1670,
0.2147, 0.2224, 0.2503, 0.3000,
0.3335, 0.3575, 0.3753, 0.4003,
0.4286, 0.4378, 0.5002, 0.5715,
0.6003, 0.6254, 0.6432, 0.6667,
0.7001, 0.7147, 0.7503, 0.7861,
0.8004, 0.8333, 0.8464, 0.8572,
0.8751, 0.9004, 0.9170, 0.9288]
ucbrsTab = [
0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x11,
0x21, 0x22, 0x44, 0x25, 0x49, 0x4A, 0x52, 0x92,
0x53, 0x55, 0xAB, 0x6B, 0xAD, 0xB5, 0xB6, 0xD6,
0xB7, 0xBB, 0xDD, 0xED, 0xEE, 0xBF, 0xDF, 0xEF,
0xF7, 0xFB, 0xFD, 0xFE]
DCOfreq = [1000000, 1000000,2670000, 5330000,
3500000, 7000000,4000000, 8000000,
5330000,16000000,7000000,21000000,
8000000,24000000, 0, 0]
def exportTab(valList,name):
print('const uint16_t '+name+'[] = {',end='')
i = 0;
for val in valList:
if i != 0:
print(', ',end='')
if i%8 == 0:
print('\n\t',end='')
print(hex(val),end='')
i = i+1
print('};')
uartFreq = 9600
brw = []
mctlw = []
for freq in DCOfreq:
ucbr = 0
ucos16 = 0
ucbrf = 0
ucbrs = 0
if freq != 0:
N = (float)(freq)/uartFreq
if N > 16:
ucos16 = 1
ucbr = (int)(N/16)
ucbrf = (int)(((N/16)-floor(N/16))*16)
else:
ucbr = (int)(N)
frac = N-floor(N);
i = 1
while frac > fracFreq[i]:
i = i+1
i = i-1
ucbrs = ucbrsTab[i]
#regs
brw.append(ucbr)
mctlw.append(ucbrs << 8 | ucbrf << 4 | ucos16)
exportTab(brw, 'tpl_brwTab')
exportTab(mctlw, 'tpl_mctlwTab')
| gpl-2.0 | 1,583,848,727,349,696,800 | 25.82716 | 63 | 0.570179 | false |
googleapis/googleapis-gen | google/cloud/bigquery/connection/v1/bigquery-connection-v1-py/google/cloud/bigquery_connection_v1/services/connection_service/client.py | 1 | 49605 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_connection_v1.services.connection_service import pagers
from google.cloud.bigquery_connection_v1.types import connection
from google.cloud.bigquery_connection_v1.types import connection as gcbc_connection
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import ConnectionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ConnectionServiceGrpcTransport
from .transports.grpc_asyncio import ConnectionServiceGrpcAsyncIOTransport
class ConnectionServiceClientMeta(type):
"""Metaclass for the ConnectionService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[ConnectionServiceTransport]]
_transport_registry["grpc"] = ConnectionServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ConnectionServiceGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[ConnectionServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConnectionServiceClient(metaclass=ConnectionServiceClientMeta):
"""Manages external data source connections and credentials."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "bigqueryconnection.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConnectionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConnectionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConnectionServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ConnectionServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def connection_path(project: str,location: str,connection: str,) -> str:
"""Returns a fully-qualified connection string."""
return "projects/{project}/locations/{location}/connections/{connection}".format(project=project, location=location, connection=connection, )
@staticmethod
def parse_connection_path(path: str) -> Dict[str,str]:
"""Parses a connection path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/connections/(?P<connection>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ConnectionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the connection service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ConnectionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConnectionServiceTransport):
# transport is a ConnectionServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_connection(self,
request: gcbc_connection.CreateConnectionRequest = None,
*,
parent: str = None,
connection: gcbc_connection.Connection = None,
connection_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcbc_connection.Connection:
r"""Creates a new connection.
Args:
request (google.cloud.bigquery_connection_v1.types.CreateConnectionRequest):
The request object. The request for
[ConnectionService.CreateConnection][google.cloud.bigquery.connection.v1.ConnectionService.CreateConnection].
parent (str):
Required. Parent resource name. Must be in the format
``projects/{project_id}/locations/{location_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
connection (google.cloud.bigquery_connection_v1.types.Connection):
Required. Connection to create.
This corresponds to the ``connection`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
connection_id (str):
Optional. Connection id that should
be assigned to the created connection.
This corresponds to the ``connection_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_connection_v1.types.Connection:
Configuration parameters to establish
connection with an external data source,
except the credential attributes.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, connection, connection_id])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a gcbc_connection.CreateConnectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcbc_connection.CreateConnectionRequest):
request = gcbc_connection.CreateConnectionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if connection is not None:
request.connection = connection
if connection_id is not None:
request.connection_id = connection_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_connection]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_connection(self,
request: connection.GetConnectionRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> connection.Connection:
r"""Returns specified connection.
Args:
request (google.cloud.bigquery_connection_v1.types.GetConnectionRequest):
The request object. The request for
[ConnectionService.GetConnection][google.cloud.bigquery.connection.v1.ConnectionService.GetConnection].
name (str):
Required. Name of the requested connection, for example:
``projects/{project_id}/locations/{location_id}/connections/{connection_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_connection_v1.types.Connection:
Configuration parameters to establish
connection with an external data source,
except the credential attributes.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a connection.GetConnectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, connection.GetConnectionRequest):
request = connection.GetConnectionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_connection]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_connections(self,
request: connection.ListConnectionsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConnectionsPager:
r"""Returns a list of connections in the given project.
Args:
request (google.cloud.bigquery_connection_v1.types.ListConnectionsRequest):
The request object. The request for
[ConnectionService.ListConnections][google.cloud.bigquery.connection.v1.ConnectionService.ListConnections].
parent (str):
Required. Parent resource name. Must be in the form:
``projects/{project_id}/locations/{location_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_connection_v1.services.connection_service.pagers.ListConnectionsPager:
The response for
[ConnectionService.ListConnections][google.cloud.bigquery.connection.v1.ConnectionService.ListConnections].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a connection.ListConnectionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, connection.ListConnectionsRequest):
request = connection.ListConnectionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_connections]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListConnectionsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def update_connection(self,
request: gcbc_connection.UpdateConnectionRequest = None,
*,
name: str = None,
connection: gcbc_connection.Connection = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcbc_connection.Connection:
r"""Updates the specified connection. For security
reasons, also resets credential if connection properties
are in the update field mask.
Args:
request (google.cloud.bigquery_connection_v1.types.UpdateConnectionRequest):
The request object. The request for
[ConnectionService.UpdateConnection][google.cloud.bigquery.connection.v1.ConnectionService.UpdateConnection].
name (str):
Required. Name of the connection to update, for example:
``projects/{project_id}/locations/{location_id}/connections/{connection_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
connection (google.cloud.bigquery_connection_v1.types.Connection):
Required. Connection containing the
updated fields.
This corresponds to the ``connection`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Update mask for the
connection fields to be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_connection_v1.types.Connection:
Configuration parameters to establish
connection with an external data source,
except the credential attributes.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, connection, update_mask])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a gcbc_connection.UpdateConnectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcbc_connection.UpdateConnectionRequest):
request = gcbc_connection.UpdateConnectionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if connection is not None:
request.connection = connection
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_connection]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_connection(self,
request: connection.DeleteConnectionRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes connection and associated credential.
Args:
request (google.cloud.bigquery_connection_v1.types.DeleteConnectionRequest):
The request object. The request for
[ConnectionService.DeleteConnectionRequest][].
name (str):
Required. Name of the deleted connection, for example:
``projects/{project_id}/locations/{location_id}/connections/{connection_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a connection.DeleteConnectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, connection.DeleteConnectionRequest):
request = connection.DeleteConnectionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_connection]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_iam_policy(self,
request: iam_policy_pb2.GetIamPolicyRequest = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does
not have a policy set.
Args:
request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest):
The request object. Request message for `GetIamPolicy`
method.
resource (str):
REQUIRED: The resource for which the
policy is being requested. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ [email protected] -
group:\ [email protected] - domain:google.com -
serviceAccount:\ [email protected]
role: roles/resourcemanager.organizationAdmin -
members: - user:\ [email protected] role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.GetIamPolicyRequest()
if resource is not None:
request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_iam_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource", request.resource),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_iam_policy(self,
request: iam_policy_pb2.SetIamPolicyRequest = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on the specified resource.
Replaces any existing policy.
Can return ``NOT_FOUND``, ``INVALID_ARGUMENT``, and
``PERMISSION_DENIED`` errors.
Args:
request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest):
The request object. Request message for `SetIamPolicy`
method.
resource (str):
REQUIRED: The resource for which the
policy is being specified. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ [email protected] -
group:\ [email protected] - domain:google.com -
serviceAccount:\ [email protected]
role: roles/resourcemanager.organizationAdmin -
members: - user:\ [email protected] role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.SetIamPolicyRequest()
if resource is not None:
request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_iam_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource", request.resource),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def test_iam_permissions(self,
request: iam_policy_pb2.TestIamPermissionsRequest = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a ``NOT_FOUND`` error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Args:
request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest):
The request object. Request message for
`TestIamPermissions` method.
resource (str):
REQUIRED: The resource for which the
policy detail is being requested. See
the operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
permissions (Sequence[str]):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.TestIamPermissionsRequest()
if resource is not None:
request.resource = resource
if permissions:
request.permissions.extend(permissions)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource", request.resource),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery-connection",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ConnectionServiceClient",
)
| apache-2.0 | 2,099,994,186,533,660,400 | 42.820671 | 149 | 0.589174 | false |
cd334/hangoutsbot | hangupsbot/plugins/forecast.py | 2 | 9901 | """
Use forecast.io to get current weather forecast for a given location.
Instructions:
* Get an API key from https://developer.forecast.io/
* Store API key in config.json:forecast_api_key
"""
import logging
import plugins
import requests
from decimal import Decimal
logger = logging.getLogger(__name__)
_internal = {}
def _initialize(bot):
api_key = bot.get_config_option('forecast_api_key')
if api_key:
_internal['forecast_api_key'] = api_key
plugins.register_user_command(['weather', 'forecast'])
plugins.register_admin_command(['setweatherlocation'])
else:
logger.error('WEATHER: config["forecast_api_key"] required')
def setweatherlocation(bot, event, *args):
"""Sets the Lat Long default coordinates for this hangout when polling for weather data
/bot setWeatherLocation <location>
"""
location = ''.join(args).strip()
if not location:
yield from bot.coro_send_message(event.conv_id, _('No location was specified, please specify a location.'))
return
location = _lookup_address(location)
if location is None:
yield from bot.coro_send_message(event.conv_id, _('Unable to find the specified location.'))
return
if not bot.memory.exists(["conv_data", event.conv.id_]):
bot.memory.set_by_path(['conv_data', event.conv.id_], {})
bot.memory.set_by_path(["conv_data", event.conv.id_, "default_weather_location"], {'lat': location['lat'], 'lng': location['lng']})
bot.memory.save()
yield from bot.coro_send_message(event.conv_id, _('This hangouts default location has been set to {}.'.format(location)))
def weather(bot, event, *args):
"""Returns weather information from Forecast.io
<b>/bot weather <location></b> Get location's current weather.
<b>/bot weather</b> Get the hangouts default location's current weather. If the default location is not set talk to a hangout admin.
"""
weather = _get_weather(bot, event, args)
if weather:
yield from bot.coro_send_message(event.conv_id, _format_current_weather(weather))
else:
yield from bot.coro_send_message(event.conv_id, 'There was an error retrieving the weather, guess you need to look outside.')
def forecast(bot, event, *args):
"""Returns a brief textual forecast from Forecast.io
<b>/bot weather <location></b> Get location's current forecast.
<b>/bot weather</b> Get the hangouts default location's forecast. If default location is not set talk to a hangout admin.
"""
weather = _get_weather(bot, event, args)
if weather:
yield from bot.coro_send_message(event.conv_id, _format_forecast_weather(weather))
else:
yield from bot.coro_send_message(event.conv_id, 'There was an error retrieving the weather, guess you need to look outside.')
def _format_current_weather(weather):
"""
Formats the current weather data for the user.
"""
weatherStrings = []
if 'temperature' in weather:
weatherStrings.append("It is currently: <b>{0}°{1}</b>".format(round(weather['temperature'],2),weather['units']['temperature']))
if 'summary' in weather:
weatherStrings.append("<i>{0}</i>".format(weather['summary']))
if 'feelsLike' in weather:
weatherStrings.append("Feels Like: {0}°{1}".format(round(weather['feelsLike'],2),weather['units']['temperature']))
if 'windspeed' in weather:
weatherStrings.append("Wind: {0} {1} from {2}".format(round(weather['windspeed'],2), weather['units']['windSpeed'], _get_wind_direction(weather['windbearing'])))
if 'humidity' in weather:
weatherStrings.append("Humidity: {0}%".format(weather['humidity']))
if 'pressure' in weather:
weatherStrings.append("Pressure: {0} {1}".format(round(weather['pressure'],2), weather['units']['pressure']))
return "<br/>".join(weatherStrings)
def _format_forecast_weather(weather):
"""
Formats the forecast data for the user.
"""
weatherStrings = []
if 'hourly' in weather:
weatherStrings.append("<b>Next 24 Hours</b><br/>{}". format(weather['hourly']))
if 'daily' in weather:
weatherStrings.append("<b>Next 7 Days</b><br/>{}". format(weather['daily']))
return "<br/>".join(weatherStrings)
def _lookup_address(location):
"""
Retrieve the coordinates of the location from googles geocode api.
Limit of 2,000 requests a day
"""
google_map_url = 'https://maps.googleapis.com/maps/api/geocode/json'
payload = {'address': location}
resp = requests.get(google_map_url, params=payload)
try:
resp.raise_for_status()
results = resp.json()['results'][0]
return {
'lat': results['geometry']['location']['lat'],
'lng': results['geometry']['location']['lng'],
'address': results['formatted_address']
}
except (IndexError, KeyError):
logger.error('unable to parse address return data: %d: %s', resp.status_code, resp.json())
return None
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout):
logger.error('unable to connect with maps.googleapis.com: %d - %s', resp.status_code, resp.text)
return None
def _lookup_weather(coords):
"""
Retrieve the current forecast for the specified coordinates from forecast.io
Limit of 1,000 requests a day
"""
forecast_io_url = 'https://api.forecast.io/forecast/{0}/{1},{2}?units=auto'.format(_internal['forecast_api_key'],coords['lat'], coords['lng'])
r = requests.get(forecast_io_url)
try:
j = r.json()
current = {
'time' : j['currently']['time'],
'summary': j['currently']['summary'],
'temperature': Decimal(j['currently']['temperature']),
'feelsLike': Decimal(j['currently']['apparentTemperature']),
'units': _get_forcast_units(j),
'humidity': int(j['currently']['humidity']*100),
'windspeed' : Decimal(j['currently']['windSpeed']),
'windbearing' : j['currently']['windBearing'],
'pressure' : j['currently']['pressure']
}
if current['units']['pressure'] == 'kPa':
current['pressure'] = Decimal(current['pressure']/10)
if 'hourly' in j:
current['hourly'] = j['hourly']['summary']
if 'daily' in j:
current['daily'] = j['daily']['summary']
except ValueError as e:
logger.error("Forecast Error: {}".format(e))
current = dict()
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout):
logger.error('unable to connect with api.forecast.io: %d - %s', resp.status_code, resp.text)
return None
return current
def _get_weather(bot,event,params):
"""
Checks memory for a default location set for the current hangout.
If one is not found and parameters were specified attempts to look up a location.
If it finds a location it then attempts to load the weather data
"""
parameters = list(params)
location = {}
if not parameters:
if bot.memory.exists(["conv_data", event.conv.id_]):
if(bot.memory.exists(["conv_data", event.conv.id_, "default_weather_location"])):
location = bot.memory.get_by_path(["conv_data", event.conv.id_, "default_weather_location"])
else:
address = ''.join(parameters).strip()
location = _lookup_address(address)
if location:
return _lookup_weather(location)
return {}
def _get_forcast_units(result):
"""
Checks to see what uni the results were passed back as and sets the display units accordingly
"""
units = {
'temperature': 'F',
'distance': 'Miles',
'percipIntensity': 'in./hr.',
'precipAccumulation': 'inches',
'windSpeed': 'mph',
'pressure': 'millibars'
}
if result['flags']:
unit = result['flags']['units']
if unit != 'us':
units['temperature'] = 'C'
units['distance'] = 'KM'
units['percipIntensity'] = 'milimeters per hour'
units['precipAccumulation'] = 'centimeters'
units['windSpeed'] = 'm/s'
units['pressure'] = 'kPa'
if unit == 'ca':
units['windSpeed'] = 'km/h'
if unit == 'uk2':
units['windSpeed'] = 'mph'
units['distance'] = 'Miles'
return units
def _get_wind_direction(degrees):
"""
Determines the direction the wind is blowing from based off the degree passed from the API
0 degrees is true north
"""
directionText = "N"
if degrees >= 5 and degrees < 40:
directionText = "NNE"
elif degrees >= 40 and degrees < 50:
directionText = "NE"
elif degrees >= 50 and degrees < 85:
directionText = "ENE"
elif degrees >= 85 and degrees < 95:
directionText = "E"
elif degrees >= 95 and degrees < 130:
directionText = "ESE"
elif degrees >= 130 and degrees < 140:
directionText = "SE"
elif degrees >= 140 and degrees < 175:
directionText = "SSE"
elif degrees >= 175 and degrees < 185:
directionText = "S"
elif degrees >= 185 and degrees < 220:
directionText = "SSW"
elif degrees >= 220 and degrees < 230:
directionText = "SW"
elif degrees >= 230 and degrees < 265:
directionText = "WSW"
elif degrees >= 265 and degrees < 275:
directionText = "W"
elif degrees >= 275 and degrees < 310:
directionText = "WNW"
elif degrees >= 310 and degrees < 320:
directionText = "NW"
elif degrees >= 320 and degrees < 355:
directionText = "NNW"
return directionText
| agpl-3.0 | -6,699,385,779,283,329,000 | 38.6 | 169 | 0.618951 | false |
gemrb/gemrb | gemrb/GUIScripts/pst/GUIREC.py | 1 | 37975 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# GUIREC.py - scripts to control stats/records windows from GUIREC winpack
# GUIREC:
# 0,1,2 - common windows (time, message, menu)
# 3 - main statistics window
# 4 - level up win
# 5 - info - kills, weapons ...
# 6 - dual class list ???
# 7 - class panel
# 8 - skills panel
# 9 - choose mage spells panel
# 10 - some small win, 1 button
# 11 - some small win, 2 buttons
# 12 - biography?
# 13 - specialist mage panel
# 14 - proficiencies
# 15 - some 2 panel window
# 16 - some 2 panel window
# 17 - some 2 panel window
# MainWindow:
# 0 - main textarea
# 1 - its scrollbar
# 2 - WMCHRP character portrait
# 5 - STALIGN alignment
# 6 - STFCTION faction
# 7,8,9 - STCSTM (info, reform party, level up)
# 0x1000000a - name
#0x1000000b - ac
#0x1000000c, 0x1000000d hp now, hp max
#0x1000000e str
#0x1000000f int
#0x10000010 wis
#0x10000011 dex
#0x10000012 con
#0x10000013 chr
#x10000014 race
#x10000015 sex
#0x10000016 class
#31-36 stat buts
#37 ac but
#38 hp but?
###################################################
import GemRB
import GUICommon
import CommonTables
import LevelUp
import LUCommon
import GUICommonWindows
import NewLife
from GUIDefines import *
from ie_stats import *
import GUIWORLD
import LUSkillsSelection
###################################################
LevelUpWindow = None
RecordsWindow = None
InformationWindow = None
BiographyWindow = None
###################################################
LevelDiff = 0
Level = 0
Classes = 0
NumClasses = 0
###################################################
def InitRecordsWindow (Window):
global RecordsWindow
global StatTable
RecordsWindow = Window
StatTable = GemRB.LoadTable("abcomm")
# Information
Button = Window.GetControl (7)
Button.SetText (4245)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenInformationWindow)
# Reform Party
Button = Window.GetControl (8)
Button.SetText (4244)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUIWORLD.OpenReformPartyWindow)
# Level Up
Button = Window.GetControl (9)
Button.SetText (4246)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenLevelUpWindow)
statevents = (OnRecordsHelpStrength, OnRecordsHelpIntelligence, OnRecordsHelpWisdom, OnRecordsHelpDexterity, OnRecordsHelpConstitution, OnRecordsHelpCharisma)
# stat buttons
for i in range (6):
Button = Window.GetControl (31 + i)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetSprites("", 0, 0, 0, 0, 0)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetEvent (IE_GUI_MOUSE_ENTER_BUTTON, statevents[i])
Button.SetEvent (IE_GUI_MOUSE_LEAVE_BUTTON, OnRecordsButtonLeave)
# AC button
Button = Window.GetControl (37)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetSprites("", 0, 0, 0, 0, 0)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetEvent (IE_GUI_MOUSE_ENTER_BUTTON, OnRecordsHelpArmorClass)
Button.SetEvent (IE_GUI_MOUSE_LEAVE_BUTTON, OnRecordsButtonLeave)
# HP button
Button = Window.GetControl (38)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetSprites ("", 0, 0, 0, 0, 0)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetEvent (IE_GUI_MOUSE_ENTER_BUTTON, OnRecordsHelpHitPoints)
Button.SetEvent (IE_GUI_MOUSE_LEAVE_BUTTON, OnRecordsButtonLeave)
return
stats_overview = None
faction_help = ''
alignment_help = ''
avatar_header = {'PrimClass': "", 'SecoClass': "", 'PrimLevel': 0, 'SecoLevel': 0, 'XP': 0, 'PrimNextLevXP': 0, 'SecoNextLevXP': 0}
def UpdateRecordsWindow (Window):
global stats_overview, faction_help, alignment_help
pc = GemRB.GameGetSelectedPCSingle ()
# Setting up the character information
GetCharacterHeader (pc)
# Checking whether character has leveled up.
Button = Window.GetControl (9)
if LUCommon.CanLevelUp (pc):
Button.SetState (IE_GUI_BUTTON_ENABLED)
else:
Button.SetState (IE_GUI_BUTTON_DISABLED)
# name
Label = Window.GetControl (0x1000000a)
Label.SetText (GemRB.GetPlayerName (pc, 1))
# portrait
Image = Window.GetControl (2)
Image.SetState (IE_GUI_BUTTON_LOCKED)
Image.SetFlags(IE_GUI_BUTTON_NO_IMAGE | IE_GUI_BUTTON_PICTURE, OP_SET)
Image.SetPicture (GUICommonWindows.GetActorPortrait (pc, 'STATS'))
# armorclass
Label = Window.GetControl (0x1000000b)
Label.SetText (str (GemRB.GetPlayerStat (pc, IE_ARMORCLASS)))
Label.SetTooltip (4197)
# hp now
Label = Window.GetControl (0x1000000c)
Label.SetText (str (GemRB.GetPlayerStat (pc, IE_HITPOINTS)))
Label.SetTooltip (4198)
# hp max
Label = Window.GetControl (0x1000000d)
Label.SetText (str (GemRB.GetPlayerStat (pc, IE_MAXHITPOINTS)))
Label.SetTooltip (4199)
# stats
sstr = GemRB.GetPlayerStat (pc, IE_STR)
bstr = GemRB.GetPlayerStat (pc, IE_STR,1)
sstrx = GemRB.GetPlayerStat (pc, IE_STREXTRA)
bstrx = GemRB.GetPlayerStat (pc, IE_STREXTRA,1)
if (sstrx > 0) and (sstr==18):
sstr = "%d/%02d" %(sstr, sstrx % 100)
if (bstrx > 0) and (bstr==18):
bstr = "%d/%02d" %(bstr, bstrx % 100)
sint = GemRB.GetPlayerStat (pc, IE_INT)
bint = GemRB.GetPlayerStat (pc, IE_INT,1)
swis = GemRB.GetPlayerStat (pc, IE_WIS)
bwis = GemRB.GetPlayerStat (pc, IE_WIS,1)
sdex = GemRB.GetPlayerStat (pc, IE_DEX)
bdex = GemRB.GetPlayerStat (pc, IE_DEX,1)
scon = GemRB.GetPlayerStat (pc, IE_CON)
bcon = GemRB.GetPlayerStat (pc, IE_CON,1)
schr = GemRB.GetPlayerStat (pc, IE_CHR)
bchr = GemRB.GetPlayerStat (pc, IE_CHR,1)
stats = (sstr, sint, swis, sdex, scon, schr)
basestats = (bstr, bint, bwis, bdex, bcon, bchr)
for i in range (6):
Label = Window.GetControl (0x1000000e + i)
if stats[i]!=basestats[i]:
Label.SetTextColor ({'r' : 255, 'g' : 0, 'b' : 0})
else:
Label.SetTextColor ({'r' : 255, 'g' : 255, 'b' : 255})
Label.SetText (str (stats[i]))
# race
# this is -1 to lookup the value in the table
race = GemRB.GetPlayerStat (pc, IE_SPECIES) - 1
# workaround for original saves that don't have the characters species stat set properly
if race == -1:
if GemRB.GetPlayerStat (pc, IE_SPECIFIC) == 3: # Vhailor
race = 50 # Changes from GHOST to RESTLESS_SPIRIT
elif GemRB.GetPlayerStat (pc, IE_SPECIFIC) == 9: # Morte
race = 44 # Changes from HUMAN to MORTE. Morte is Morte :)
else:
race = GemRB.GetPlayerStat (pc, IE_RACE) - 1
text = CommonTables.Races.GetValue (race, 0)
Label = Window.GetControl (0x10000014)
Label.SetText (text)
# sex
GenderTable = GemRB.LoadTable ("GENDERS")
text = GenderTable.GetValue (GemRB.GetPlayerStat (pc, IE_SEX) - 1, GTV_STR)
Label = Window.GetControl (0x10000015)
Label.SetText (text)
# class
text = CommonTables.Classes.GetValue (GUICommon.GetClassRowName (pc), "NAME_REF")
Label = Window.GetControl (0x10000016)
Label.SetText (text)
# alignment
align = GemRB.GetPlayerStat (pc, IE_ALIGNMENT)
ss = GemRB.LoadSymbol ("ALIGN")
sym = ss.GetValue (align)
AlignmentTable = GemRB.LoadTable ("ALIGNS")
alignment_help = AlignmentTable.GetValue (sym, 'DESC_REF', GTV_REF)
frame = (3 * int (align / 16) + align % 16) - 4
Button = Window.GetControl (5)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetSprites ('STALIGN', 0, frame, 0, 0, 0)
Button.SetEvent (IE_GUI_MOUSE_ENTER_BUTTON, OnRecordsHelpAlignment)
Button.SetEvent (IE_GUI_MOUSE_LEAVE_BUTTON, OnRecordsButtonLeave)
# faction
faction = GemRB.GetPlayerStat (pc, IE_FACTION)
FactionTable = GemRB.LoadTable ("FACTIONS")
faction_help = FactionTable.GetValue (faction, 0, GTV_REF)
frame = FactionTable.GetValue (faction, 1)
Button = Window.GetControl (6)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetSprites ('STFCTION', 0, frame, 0, 0, 0)
Button.SetEvent (IE_GUI_MOUSE_ENTER_BUTTON, OnRecordsHelpFaction)
Button.SetEvent (IE_GUI_MOUSE_LEAVE_BUTTON, OnRecordsButtonLeave)
# help, info textarea
stats_overview = GetStatOverview (pc)
Text = Window.GetControl (0)
Text.SetText (stats_overview)
return
ToggleRecordsWindow = GUICommonWindows.CreateTopWinLoader(3, "GUIREC", GUICommonWindows.ToggleWindow, InitRecordsWindow, UpdateRecordsWindow, WINDOW_TOP|WINDOW_HCENTER, True)
OpenRecordsWindow = GUICommonWindows.CreateTopWinLoader(3, "GUIREC", GUICommonWindows.OpenWindowOnce, InitRecordsWindow, UpdateRecordsWindow, WINDOW_TOP|WINDOW_HCENTER, True)
# puts default info to textarea (overview of PC's bonuses, saves, etc.
def OnRecordsButtonLeave ():
OnRecordsHelpStat (-1, 0, stats_overview)
return
def OnRecordsHelpFaction ():
Help = GemRB.GetString (20106) + "\n\n" + faction_help
OnRecordsHelpStat (-1, 0, Help)
return
def OnRecordsHelpArmorClass ():
OnRecordsHelpStat (-1, 0, 18493)
return
def OnRecordsHelpHitPoints ():
OnRecordsHelpStat (-1, 0, 18494)
return
def OnRecordsHelpAlignment ():
Help = GemRB.GetString (20105) + "\n\n" + alignment_help
OnRecordsHelpStat (-1, 0, Help)
return
#Bio:
# 38787 no
# 39423 morte
# 39424 annah
# 39425 dakkon
# 39426 ffg
# 39427 ignus
# 39428 nordom
# 39429 vhailor
def OnRecordsHelpStat (row, col, strref, bon1=0, bon2=0):
TextArea = RecordsWindow.GetControl (0)
TextArea.SetText (strref)
if row != -1:
TextArea.Append ("\n\n" + GemRB.StatComment (StatTable.GetValue(str(row),col), bon1, bon2) )
return
def OnRecordsHelpStrength ():
# These are used to get the stats
pc = GemRB.GameGetSelectedPCSingle ()
# Getting the character's strength
s = GemRB.GetPlayerStat (pc, IE_STR)
e = GemRB.GetPlayerStat (pc, IE_STREXTRA)
x = CommonTables.StrMod.GetValue(s, 0) + CommonTables.StrModEx.GetValue(e, 0)
y = CommonTables.StrMod.GetValue(s, 1) + CommonTables.StrModEx.GetValue(e, 1)
if x==0:
x=y
y=0
if e>60:
s=19
OnRecordsHelpStat (s, "STR", 18489, x, y)
return
def OnRecordsHelpDexterity ():
# Loading table of modifications
Table = GemRB.LoadTable("dexmod")
# These are used to get the stats
pc = GemRB.GameGetSelectedPCSingle ()
# Getting the character's dexterity
Dex = GemRB.GetPlayerStat (pc, IE_DEX)
# Getting the dexterity description
x = -Table.GetValue(Dex,2)
OnRecordsHelpStat (Dex, "DEX", 18487, x)
return
def OnRecordsHelpIntelligence ():
# These are used to get the stats
pc = GemRB.GameGetSelectedPCSingle ()
# Getting the character's intelligence
Int = GemRB.GetPlayerStat (pc, IE_INT)
OnRecordsHelpStat (Int, "INT", 18488)
return
def OnRecordsHelpWisdom ():
# These are used to get the stats
pc = GemRB.GameGetSelectedPCSingle ()
# Getting the character's wisdom
Wis = GemRB.GetPlayerStat (pc, IE_WIS)
OnRecordsHelpStat (Wis, "WIS", 18490)
return
def OnRecordsHelpConstitution ():
# Loading table of modifications
Table = GemRB.LoadTable("hpconbon")
# These are used to get the stats
pc = GemRB.GameGetSelectedPCSingle ()
# Getting the character's constitution
Con = GemRB.GetPlayerStat (pc, IE_CON)
# Getting the constitution description
x = Table.GetValue(Con-1,1)
OnRecordsHelpStat (Con, "CON", 18491, x)
return
def OnRecordsHelpCharisma ():
# These are used to get the stats
pc = GemRB.GameGetSelectedPCSingle ()
# Getting the character's charisma
Cha = GemRB.GetPlayerStat (pc, IE_CHR)
OnRecordsHelpStat (Cha, "CHR", 1903)
return
def GetCharacterHeader (pc):
global avatar_header
Class = GemRB.GetPlayerStat (pc, IE_CLASS) - 1
Multi = GUICommon.HasMultiClassBits (pc)
# Nameless is a special case (dual class)
if GUICommon.IsNamelessOne(pc):
avatar_header['PrimClass'] = CommonTables.Classes.GetRowName (Class)
avatar_header['SecoClass'] = "*"
avatar_header['SecoLevel'] = 0
if avatar_header['PrimClass'] == "FIGHTER":
avatar_header['PrimLevel'] = GemRB.GetPlayerStat (pc, IE_LEVEL)
avatar_header['XP'] = GemRB.GetPlayerStat (pc, IE_XP)
elif avatar_header['PrimClass'] == "MAGE":
avatar_header['PrimLevel'] = GemRB.GetPlayerStat (pc, IE_LEVEL2)
avatar_header['XP'] = GemRB.GetPlayerStat (pc, IE_XP_MAGE)
else:
avatar_header['PrimLevel'] = GemRB.GetPlayerStat (pc, IE_LEVEL3)
avatar_header['XP'] = GemRB.GetPlayerStat (pc, IE_XP_THIEF)
avatar_header['PrimNextLevXP'] = GetNextLevelExp (avatar_header['PrimLevel'], avatar_header['PrimClass'])
avatar_header['SecoNextLevXP'] = 0
else:
# PC is not NAMELESS_ONE
avatar_header['PrimLevel'] = GemRB.GetPlayerStat (pc, IE_LEVEL)
avatar_header['XP'] = GemRB.GetPlayerStat (pc, IE_XP)
if Multi:
avatar_header['XP'] = avatar_header['XP'] / 2
avatar_header['SecoLevel'] = GemRB.GetPlayerStat (pc, IE_LEVEL2)
avatar_header['PrimClass'] = "FIGHTER"
if Multi == 3:
#fighter/mage
Class = 0
else:
#fighter/thief
Class = 3
avatar_header['SecoClass'] = CommonTables.Classes.GetRowName (Class)
avatar_header['PrimNextLevXP'] = GetNextLevelExp (avatar_header['PrimLevel'], avatar_header['PrimClass'])
avatar_header['SecoNextLevXP'] = GetNextLevelExp (avatar_header['SecoLevel'], avatar_header['SecoClass'])
# Converting to the displayable format
avatar_header['SecoClass'] = CommonTables.Classes.GetValue (avatar_header['SecoClass'], "NAME_REF", GTV_REF)
else:
avatar_header['SecoLevel'] = 0
avatar_header['PrimClass'] = CommonTables.Classes.GetRowName (Class)
avatar_header['SecoClass'] = "*"
avatar_header['PrimNextLevXP'] = GetNextLevelExp (avatar_header['PrimLevel'], avatar_header['PrimClass'])
avatar_header['SecoNextLevXP'] = 0
# Converting to the displayable format
avatar_header['PrimClass'] = CommonTables.Classes.GetValue (avatar_header['PrimClass'], "NAME_REF", GTV_REF)
def GetNextLevelExp (Level, Class):
if (Level < 20):
NextLevel = CommonTables.NextLevel.GetValue (Class, str (Level + 1))
else:
After21ExpTable = GemRB.LoadTable ("LVL21PLS")
ExpGap = After21ExpTable.GetValue (Class, 'XPGAP')
LevDiff = Level - 19
Lev20Exp = CommonTables.NextLevel.GetValue (Class, "20")
NextLevel = Lev20Exp + (LevDiff * ExpGap)
return NextLevel
def GetStatOverview (pc):
won = "[color=FFFFFF]"
woff = "[/color]"
str_None = GemRB.GetString (41275)
GS = lambda s, pc=pc: GemRB.GetPlayerStat (pc, s)
stats = []
# Displaying Class, Level, Experience and Next Level Experience
if (avatar_header['SecoLevel'] == 0):
stats.append ((avatar_header['PrimClass'], "", 'd'))
stats.append ((48156, avatar_header['PrimLevel'], ''))
stats.append ((19673, avatar_header['XP'], ''))
stats.append ((19674, avatar_header['PrimNextLevXP'], ''))
else:
stats.append ((19414, "", ''))
stats.append (None)
stats.append ((avatar_header['PrimClass'], "", 'd'))
stats.append ((48156, avatar_header['PrimLevel'], ''))
stats.append ((19673, avatar_header['XP'], ''))
stats.append ((19674, avatar_header['PrimNextLevXP'], ''))
stats.append (None)
stats.append ((avatar_header['SecoClass'], "", 'd'))
stats.append ((48156, avatar_header['SecoLevel'], ''))
stats.append ((19673, avatar_header['XP'], ''))
stats.append ((19674, avatar_header['SecoNextLevXP'], ''))
# 59856 Current State
stats.append (None)
StatesTable = GemRB.LoadTable ("states")
StateID = GS (IE_STATE_ID)
State = StatesTable.GetValue (str (StateID), "NAME_REF", GTV_REF)
stats.append ((won + GemRB.GetString (59856) + woff, "", 'd'))
stats.append ((State, "", 'd'))
stats.append (None)
# 67049 AC Bonuses
stats.append (67049)
# 67204 AC vs. Slashing
stats.append ((67204, GS (IE_ACSLASHINGMOD), ''))
# 67205 AC vs. Piercing
stats.append ((67205, GS (IE_ACPIERCINGMOD), ''))
# 67206 AC vs. Crushing
stats.append ((67206, GS (IE_ACCRUSHINGMOD), ''))
# 67207 AC vs. Missile
stats.append ((67207, GS (IE_ACMISSILEMOD), ''))
stats.append (None)
# 67208 Resistances
stats.append (67208)
# 67209 Normal Fire
stats.append ((67209, GS (IE_RESISTFIRE), '%'))
# 67210 Magic Fire
stats.append ((67210, GS (IE_RESISTMAGICFIRE), '%'))
# 67211 Normal Cold
stats.append ((67211, GS (IE_RESISTCOLD), '%'))
# 67212 Magic Cold
stats.append ((67212, GS (IE_RESISTMAGICCOLD), '%'))
# 67213 Electricity
stats.append ((67213, GS (IE_RESISTELECTRICITY), '%'))
# 67214 Acid
stats.append ((67214, GS (IE_RESISTACID), '%'))
# 67215 Magic
stats.append ((67215, GS (IE_RESISTMAGIC), '%'))
# 67216 Slashing Attacks
stats.append ((67216, GS (IE_RESISTSLASHING), '%'))
# 67217 Piercing Attacks
stats.append ((67217, GS (IE_RESISTPIERCING), '%'))
# 67218 Crushing Attacks
stats.append ((67218, GS (IE_RESISTCRUSHING), '%'))
# 67219 Missile Attacks
stats.append ((67219, GS (IE_RESISTMISSILE), '%'))
stats.append (None)
# 4220 Proficiencies
stats.append (4220)
# 4208 THAC0
stats.append ((4208, GS (IE_TOHIT), ''))
# 4209 Number of Attacks
tmp = GemRB.GetCombatDetails(pc, 0)["APR"]
if (tmp&1):
tmp2 = str(tmp/2) + chr(189)
else:
tmp2 = str(tmp/2)
stats.append ((4209, tmp2, ''))
# 4210 Lore
stats.append ((4210, GS (IE_LORE), ''))
# 4211 Open Locks
stats.append ((4211, GS (IE_LOCKPICKING), '%'))
# 4212 Stealth
stats.append ((4212, GS (IE_STEALTH), '%'))
# 4213 Find/Remove Traps
stats.append ((4213, GS (IE_TRAPS), '%'))
# 4214 Pick Pockets
stats.append ((4214, GS (IE_PICKPOCKET), '%'))
# 4215 Tracking
stats.append ((4215, GS (IE_TRACKING), ''))
# 4216 Reputation
stats.append ((4216, GS (IE_REPUTATION), ''))
# 4217 Turn Undead Level
stats.append ((4217, GS (IE_TURNUNDEADLEVEL), ''))
# 4218 Lay on Hands Amount
stats.append ((4218, GS (IE_LAYONHANDSAMOUNT), ''))
# 4219 Backstab Damage
stats.append ((4219, GS (IE_BACKSTABDAMAGEMULTIPLIER), 'x'))
stats.append (None)
# 4221 Saving Throws
stats.append (4221)
# 4222 Paralyze/Poison/Death
stats.append ((4222, GS (IE_SAVEVSDEATH), ''))
# 4223 Rod/Staff/Wand
stats.append ((4223, GS (IE_SAVEVSWANDS), ''))
# 4224 Petrify/Polymorph
stats.append ((4224, GS (IE_SAVEVSPOLY), ''))
# 4225 Breath Weapon
stats.append ((4225, GS (IE_SAVEVSBREATH), ''))
# 4226 Spells
stats.append ((4226, GS (IE_SAVEVSSPELL), ''))
stats.append (None)
# 4227 Weapon Proficiencies
stats.append (4227)
# 55011 Unused Slots
stats.append ((55011, GS (IE_FREESLOTS), ''))
# 33642 Fist
stats.append ((33642, GS (IE_PROFICIENCYBASTARDSWORD), '+'))
# 33649 Edged Weapon
stats.append ((33649, GS (IE_PROFICIENCYLONGSWORD), '+'))
# 33651 Hammer
stats.append ((33651, GS (IE_PROFICIENCYSHORTSWORD), '+'))
# 44990 Axe
stats.append ((44990, GS (IE_PROFICIENCYAXE), '+'))
# 33653 Club
stats.append ((33653, GS (IE_PROFICIENCYTWOHANDEDSWORD), '+'))
# 33655 Bow
stats.append ((33655, GS (IE_PROFICIENCYKATANA), '+'))
stats.append (None)
# 4228 Ability Bonuses
stats.append (4228)
# 4229 To Hit
# 4230 Damage
# 4231 Open Doors
# 4232 Weight Allowance
# 4233 Armor Class Bonus
# 4234 Missile Adjustment
stats.append ((4234, GS (IE_ACMISSILEMOD), ''))
# 4236 CON HP Bonus/Level
# 4240 Reaction
stats.append (None)
# 4238 Magical Defense Adjustment
stats.append (4238)
# 4239 Bonus Priest Spells
stats.append ((4239, GS (IE_CASTINGLEVELBONUSCLERIC), ''))
stats.append (None)
# 4237 Chance to learn spell
#SpellLearnChance = won + GemRB.GetString (4237) + woff
# ??? 4235 Reaction Adjustment
res = []
lines = 0
for s in stats:
try:
strref, val, stattype = s
if val == 0 and stattype != '0':
continue
if stattype == '+':
res.append (GemRB.GetString (strref) + ' '+ '+' * val)
elif stattype == 'd': #strref is an already resolved string
res.append (strref)
elif stattype == 'x':
res.append (GemRB.GetString (strref) + ': x' + str (val))
else:
res.append (GemRB.GetString (strref) + ': ' + str (val) + stattype)
lines = 1
except:
if s != None:
res.append (won + GemRB.GetString (s) + woff)
lines = 0
else:
if not lines:
res.append (str_None)
res.append ("")
lines = 0
return "\n".join (res)
def OpenInformationWindow ():
global InformationWindow
if InformationWindow != None:
if BiographyWindow: OpenBiographyWindow ()
if InformationWindow:
InformationWindow.Unload ()
InformationWindow = None
return
InformationWindow = Window = GemRB.LoadWindow (5)
# Biography
Button = Window.GetControl (1)
Button.SetText (4247)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenBiographyWindow)
# Done
Button = Window.GetControl (0)
Button.SetText (1403)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenInformationWindow)
Button.MakeEscape()
TotalPartyExp = 0
TotalPartyKills = 0
for i in range (1, GemRB.GetPartySize() + 1):
stat = GemRB.GetPCStats(i)
TotalPartyExp = TotalPartyExp + stat['KillsTotalXP']
TotalPartyKills = TotalPartyKills + stat['KillsTotalCount']
# These are used to get the stats
pc = GemRB.GameGetSelectedPCSingle ()
stat = GemRB.GetPCStats (pc)
Label = Window.GetControl (0x10000001)
Label.SetText (GemRB.GetPlayerName (pc, 1))
# class
ClassTitle = GUICommon.GetActorClassTitle (pc)
Label = Window.GetControl (0x1000000A)
Label.SetText (ClassTitle)
Label = Window.GetControl (0x10000002)
if stat['BestKilledName'] == -1:
Label.SetText (GemRB.GetString (41275))
else:
Label.SetText (GemRB.GetString (stat['BestKilledName']))
Label = Window.GetControl (0x10000003)
GUICommon.SetCurrentDateTokens (stat, True)
Label.SetText (41277)
Label = Window.GetControl (0x10000004)
Label.SetText (stat['FavouriteSpell'])
Label = Window.GetControl (0x10000005)
Label.SetText (stat['FavouriteWeapon'])
Label = Window.GetControl (0x10000006)
if TotalPartyExp != 0:
PartyExp = int ((stat['KillsTotalXP'] * 100) / TotalPartyExp)
Label.SetText (str (PartyExp) + '%')
else:
Label.SetText ("0%")
Label = Window.GetControl (0x10000007)
if TotalPartyKills != 0:
PartyKills = int ((stat['KillsTotalCount'] * 100) / TotalPartyKills)
Label.SetText (str (PartyKills) + '%')
else:
Label.SetText ("0%")
Label = Window.GetControl (0x10000008)
Label.SetText (str (stat['KillsTotalXP']))
Label = Window.GetControl (0x10000009)
Label.SetText (str (stat['KillsTotalCount']))
White = {'r' : 255, 'g' : 255, 'b' : 255}
Label = Window.GetControl (0x1000000B)
Label.SetTextColor (White)
Label = Window.GetControl (0x1000000C)
Label.SetTextColor (White)
Label = Window.GetControl (0x1000000D)
Label.SetTextColor (White)
Label = Window.GetControl (0x1000000E)
Label.SetTextColor (White)
Label = Window.GetControl (0x1000000F)
Label.SetTextColor (White)
Label = Window.GetControl (0x10000010)
Label.SetTextColor (White)
Label = Window.GetControl (0x10000011)
Label.SetTextColor (White)
Label = Window.GetControl (0x10000012)
Label.SetTextColor (White)
Window.ShowModal (MODAL_SHADOW_GRAY)
def OpenBiographyWindow ():
global BiographyWindow
if BiographyWindow != None:
if BiographyWindow:
BiographyWindow.Unload ()
BiographyWindow = None
InformationWindow.ShowModal (MODAL_SHADOW_GRAY)
return
BiographyWindow = Window = GemRB.LoadWindow (12)
# These are used to get the bio
pc = GemRB.GameGetSelectedPCSingle ()
BioTable = GemRB.LoadTable ("bios")
Specific = GemRB.GetPlayerStat (pc, IE_SPECIFIC)
BioText = int (BioTable.GetValue (BioTable.GetRowName (Specific), 'BIO'))
TextArea = Window.GetControl (0)
TextArea.SetText (BioText)
# Done
Button = Window.GetControl (2)
Button.SetText (1403)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenBiographyWindow)
Button.MakeEscape()
Window.ShowModal (MODAL_SHADOW_GRAY)
def AcceptLevelUp():
#do level up
pc = GemRB.GameGetSelectedPCSingle ()
GemRB.SetPlayerStat (pc, IE_SAVEVSDEATH, SavThrows[0])
GemRB.SetPlayerStat (pc, IE_SAVEVSWANDS, SavThrows[1])
GemRB.SetPlayerStat (pc, IE_SAVEVSPOLY, SavThrows[2])
GemRB.SetPlayerStat (pc, IE_SAVEVSBREATH, SavThrows[3])
GemRB.SetPlayerStat (pc, IE_SAVEVSSPELL, SavThrows[4])
oldhp = GemRB.GetPlayerStat (pc, IE_MAXHITPOINTS, 1)
GemRB.SetPlayerStat (pc, IE_MAXHITPOINTS, HPGained+oldhp)
oldhp = GemRB.GetPlayerStat (pc, IE_HITPOINTS, 1)
GemRB.SetPlayerStat (pc, IE_HITPOINTS, HPGained+oldhp)
# Weapon Proficiencies
if WeapProfType != -1:
# Companion NPC's get points added directly to their chosen weapon
GemRB.SetPlayerStat (pc, IE_PROFICIENCYBASTARDSWORD+WeapProfType, CurrWeapProf + WeapProfGained)
else:
# TNO has points added to the "Unused Slots" dummy proficiency
freeSlots = GemRB.GetPlayerStat(pc, IE_FREESLOTS)
GemRB.SetPlayerStat (pc, IE_FREESLOTS, freeSlots + WeapProfGained)
SwitcherClass = GUICommon.NamelessOneClass(pc)
if SwitcherClass:
# Handle saving of TNO class level in the correct CRE stat
Levels = { "FIGHTER" : GemRB.GetPlayerStat (pc, IE_LEVEL) , "MAGE": GemRB.GetPlayerStat (pc, IE_LEVEL2), "THIEF": GemRB.GetPlayerStat (pc, IE_LEVEL3) }
LevelStats = { "FIGHTER" : IE_LEVEL , "MAGE": IE_LEVEL2, "THIEF": IE_LEVEL3 }
GemRB.SetPlayerStat (pc, LevelStats[SwitcherClass], Levels[SwitcherClass]+NumOfPrimLevUp)
else:
GemRB.SetPlayerStat (pc, IE_LEVEL, GemRB.GetPlayerStat (pc, IE_LEVEL)+NumOfPrimLevUp)
if avatar_header['SecoLevel'] != 0:
GemRB.SetPlayerStat (pc, IE_LEVEL2, GemRB.GetPlayerStat (pc, IE_LEVEL2)+NumOfSecoLevUp)
LUSkillsSelection.SkillsSave (pc)
# Spells
LevelUp.pc = pc
LevelUp.Classes = Classes
LevelUp.NumClasses = NumClasses
# (we need to override the globals this function uses there since they wouldn't have been set)
LevelUp.SaveNewSpells()
LevelUpWindow.Close()
NewLife.OpenLUStatsWindow()
def RedrawSkills():
DoneButton = LevelUpWindow.GetControl(0)
if GemRB.GetVar ("SkillPointsLeft") == 0:
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
else:
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
return
def OpenLevelUpWindow ():
global LevelUpWindow
global SavThrows
global HPGained
global WeapProfType, CurrWeapProf, WeapProfGained
global NumOfPrimLevUp, NumOfSecoLevUp
global LevelDiff, Level, Classes, NumClasses
LevelUpWindow = Window = GemRB.LoadWindow (4, "GUIREC") # since we get called from NewLife
# Accept
Button = Window.GetControl (0)
Button.SetText (4192)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, AcceptLevelUp)
pc = GemRB.GameGetSelectedPCSingle ()
# These are used to identify Nameless One
BioTable = GemRB.LoadTable ("bios")
Specific = GemRB.GetPlayerStat (pc, IE_SPECIFIC)
AvatarName = BioTable.GetRowName (Specific)
# These will be used for saving throws
SavThrUpdated = False
SavThrows = [0,0,0,0,0]
SavThrows[0] = GemRB.GetPlayerStat (pc, IE_SAVEVSDEATH)
SavThrows[1] = GemRB.GetPlayerStat (pc, IE_SAVEVSWANDS)
SavThrows[2] = GemRB.GetPlayerStat (pc, IE_SAVEVSPOLY)
SavThrows[3] = GemRB.GetPlayerStat (pc, IE_SAVEVSBREATH)
SavThrows[4] = GemRB.GetPlayerStat (pc, IE_SAVEVSSPELL)
HPGained = 0
ConHPBon = 0
Thac0Updated = False
Thac0 = 0
WeapProfGained = 0
WeapProfType = -1
CurrWeapProf = 0
# Count the number of existing weapon procifiencies
if GUICommon.IsNamelessOne(pc):
# TNO: Count the total amount of unassigned proficiencies
CurrWeapProf = GemRB.GetPlayerStat(pc, IE_FREESLOTS)
else:
# Scan the weapprof table for the characters favoured weapon proficiency (WeapProfType)
# This does not apply to Nameless since he uses unused slots system
for i in range (6):
WeapProfName = CommonTables.WeapProfs.GetRowName (i)
value = CommonTables.WeapProfs.GetValue (WeapProfName,AvatarName)
if value == 1:
WeapProfType = i
break
for i in range (6):
CurrWeapProf += GemRB.GetPlayerStat (pc, IE_PROFICIENCYBASTARDSWORD + i)
# What is the avatar's class (Which we can use to lookup XP)
Class = GUICommon.GetClassRowName (pc)
# name
Label = Window.GetControl (0x10000000)
Label.SetText (GemRB.GetPlayerName (pc, 1))
# class
Label = Window.GetControl (0x10000001)
Label.SetText (CommonTables.Classes.GetValue (Class, "NAME_REF"))
# Armor Class
Label = Window.GetControl (0x10000023)
Label.SetText (str (GemRB.GetPlayerStat (pc, IE_ARMORCLASS)))
# our multiclass variables
IsMulti = GUICommon.IsMultiClassed (pc, 1)
Classes = [IsMulti[1], IsMulti[2], IsMulti[3]]
NumClasses = IsMulti[0] # 2 or 3 if IsMulti; 0 otherwise
IsMulti = NumClasses > 1
if not IsMulti:
NumClasses = 1
Classes = [GemRB.GetPlayerStat (pc, IE_CLASS)]
if GUICommon.IsNamelessOne(pc):
# Override the multiclass info for TNO
IsMulti = 1
NumClasses = 3
# Fighter, Mage, Thief ID
Classes = [2, 1, 4]
Level = LUCommon.GetNextLevels(pc, Classes)
LevelDiff = LUCommon.GetLevelDiff(pc, Level)
# calculate the new spells (results are stored in global variables in LevelUp)
LevelUp.GetNewSpells(pc, Classes, Level, LevelDiff)
# Thief Skills
Level1 = []
for i in range (len (Level)):
Level1.append (Level[i]-LevelDiff[i])
LUSkillsSelection.SetupSkillsWindow (pc, LUSkillsSelection.LUSKILLS_TYPE_LEVELUP, LevelUpWindow, RedrawSkills, Level1, Level, 0, False)
RedrawSkills()
# Is avatar multi-class?
if avatar_header['SecoLevel'] == 0:
# avatar is single class
# What will be avatar's next level?
NextLevel = avatar_header['PrimLevel'] + 1
while avatar_header['XP'] >= GetNextLevelExp (NextLevel, Class):
NextLevel = NextLevel + 1
NumOfPrimLevUp = NextLevel - avatar_header['PrimLevel'] # How many levels did we go up?
#How many weapon procifiencies we get
for i in range (NumOfPrimLevUp):
WeapProfGained += GainedWeapProfs (pc, CurrWeapProf + WeapProfGained, avatar_header['PrimLevel'] + i, AvatarName)
# Hit Points Gained and Hit Points from Constitution Bonus
for i in range (NumOfPrimLevUp):
HPGained = HPGained + GetSingleClassHP (Class, avatar_header['PrimLevel'])
if Class == "FIGHTER":
CONType = 0
else:
CONType = 1
ConHPBon = GetConHPBonus (pc, NumOfPrimLevUp, 0, CONType)
# Thac0
Thac0 = GetThac0 (Class, NextLevel)
# Is the new thac0 better than old? (The smaller, the better)
if Thac0 < GemRB.GetPlayerStat (pc, IE_TOHIT, 1):
Thac0Updated = True
# Saving Throws
if GUICommon.IsNamelessOne(pc):
# Nameless One always uses the best possible save from each class
FigSavThrTable = GemRB.LoadTable ("SAVEWAR")
MagSavThrTable = GemRB.LoadTable ("SAVEWIZ")
ThiSavThrTable = GemRB.LoadTable ("SAVEROG")
FighterLevel = GemRB.GetPlayerStat (pc, IE_LEVEL) - 1
MageLevel = GemRB.GetPlayerStat (pc, IE_LEVEL2) - 1
ThiefLevel = GemRB.GetPlayerStat (pc, IE_LEVEL3) - 1
# We are leveling up one of those levels. Therefore, one of them has to be updated.
if Class == "FIGHTER":
FighterLevel = NextLevel - 1
elif Class == "MAGE":
MageLevel = NextLevel - 1
else:
ThiefLevel = NextLevel - 1
# Now we need to update the saving throws with the best values from those tables.
# The smaller the number, the better saving throw it is.
# We also need to check if any of the levels are larger than 21, since
# after that point the table runs out, and the throws remain the
# same
if FighterLevel < 21:
for i in range (5):
Throw = FigSavThrTable.GetValue (i, FighterLevel)
if Throw < SavThrows[i]:
SavThrows[i] = Throw
SavThrUpdated = True
if MageLevel < 21:
for i in range (5):
Throw = MagSavThrTable.GetValue (i, MageLevel)
if Throw < SavThrows[i]:
SavThrows[i] = Throw
SavThrUpdated = True
if ThiefLevel < 21:
for i in range (5):
Throw = ThiSavThrTable.GetValue (i, ThiefLevel)
if Throw < SavThrows[i]:
SavThrows[i] = Throw
SavThrUpdated = True
else:
SavThrTable = GemRB.LoadTable (CommonTables.Classes.GetValue (Class, "SAVE"))
# Updating the current saving throws. They are changed only if the
# new ones are better than current. The smaller the number, the better.
# We need to substract one from the NextLevel, so that we get right values.
# We also need to check if NextLevel is larger than 21, since after that point
# the table runs out, and the throws remain the same
if NextLevel < 22:
for i in range (5):
Throw = SavThrTable.GetValue (i, NextLevel-1)
if Throw < SavThrows[i]:
SavThrows[i] = Throw
SavThrUpdated = True
else:
# avatar is multi class
# we have only fighter/X multiclasses, so this
# part is a bit hardcoded
PrimNextLevel = 0
SecoNextLevel = 0
NumOfPrimLevUp = 0
NumOfSecoLevUp = 0
# What will be avatar's next levels?
PrimNextLevel = avatar_header['PrimLevel']
while avatar_header['XP'] >= GetNextLevelExp (PrimNextLevel, "FIGHTER"):
PrimNextLevel = PrimNextLevel + 1
# How many primary levels did we go up?
NumOfPrimLevUp = PrimNextLevel - avatar_header['PrimLevel']
for i in range (NumOfPrimLevUp):
WeapProfGained += GainedWeapProfs (pc, CurrWeapProf + WeapProfGained, avatar_header['PrimLevel'] + i, AvatarName)
# Saving Throws
FigSavThrTable = GemRB.LoadTable ("SAVEWAR")
if PrimNextLevel < 22:
for i in range (5):
Throw = FigSavThrTable.GetValue (i, PrimNextLevel - 1)
if Throw < SavThrows[i]:
SavThrows[i] = Throw
SavThrUpdated = True
# Which multi class is it?
if GemRB.GetPlayerStat (pc, IE_CLASS) == 7:
# avatar is Fighter/Mage (Dak'kon)
Class = "MAGE"
SavThrTable = GemRB.LoadTable ("SAVEWIZ")
else:
# avatar is Fighter/Thief (Annah)
Class = "THIEF"
SavThrTable = GemRB.LoadTable ("SAVEROG")
SecoNextLevel = avatar_header['SecoLevel']
while avatar_header['XP'] >= GetNextLevelExp (SecoNextLevel, Class):
SecoNextLevel = SecoNextLevel + 1
# How many secondary levels did we go up?
NumOfSecoLevUp = SecoNextLevel - avatar_header['SecoLevel']
if SecoNextLevel < 22:
for i in range (5):
Throw = SavThrTable.GetValue (i, SecoNextLevel - 1)
if Throw < SavThrows[i]:
SavThrows[i] = Throw
SavThrUpdated = True
# Hit Points Gained and Hit Points from Constitution Bonus (multiclass)
for i in range (NumOfPrimLevUp):
HPGained = HPGained + GetSingleClassHP ("FIGHTER", avatar_header['PrimLevel'])/2
for i in range (NumOfSecoLevUp):
HPGained = HPGained + GetSingleClassHP (Class, avatar_header['SecoLevel'])/2
ConHPBon = GetConHPBonus (pc, NumOfPrimLevUp, NumOfSecoLevUp, 2)
# Thac0
# Multi class use the primary class level to determine Thac0
Thac0 = GetThac0 (Class, PrimNextLevel)
# Is the new thac0 better than old? (The smaller the better)
if Thac0 < GemRB.GetPlayerStat (pc, IE_TOHIT, 1):
Thac0Updated = True
# Displaying the saving throws
# Death
Label = Window.GetControl (0x10000019)
Label.SetText (str (SavThrows[0]))
# Wand
Label = Window.GetControl (0x1000001B)
Label.SetText (str (SavThrows[1]))
# Polymorph
Label = Window.GetControl (0x1000001D)
Label.SetText (str (SavThrows[2]))
# Breath
Label = Window.GetControl (0x1000001F)
Label.SetText (str (SavThrows[3]))
# Spell
Label = Window.GetControl (0x10000021)
Label.SetText (str (SavThrows[4]))
FinalCurHP = GemRB.GetPlayerStat (pc, IE_HITPOINTS) + HPGained
FinalMaxHP = GemRB.GetPlayerStat (pc, IE_MAXHITPOINTS) + HPGained
# Current HP
Label = Window.GetControl (0x10000025)
Label.SetText (str (FinalCurHP))
# Max HP
Label = Window.GetControl (0x10000027)
Label.SetText (str (FinalMaxHP))
# Displaying level up info
overview = ""
if CurrWeapProf!=-1 and WeapProfGained>0:
overview = overview + GemRB.GetString (38715) + '\n' + '+' + str (WeapProfGained) + '\n'
overview = overview + str (HPGained) + " " + GemRB.GetString (38713) + '\n'
overview = overview + str (ConHPBon) + " " + GemRB.GetString (38727) + '\n'
if SavThrUpdated:
overview = overview + GemRB.GetString (38719) + '\n'
if Thac0Updated:
GemRB.SetPlayerStat (pc, IE_TOHIT, Thac0)
overview = overview + GemRB.GetString (38718) + '\n'
Text = Window.GetControl (3)
Text.SetText (overview)
Window.ShowModal (MODAL_SHADOW_GRAY)
def GetSingleClassHP (Class, Level):
HPTable = GemRB.LoadTable (CommonTables.Classes.GetValue (Class, "HP"))
# We need to check if Level is larger than 20, since after that point
# the table runs out, and the formula remain the same.
if Level > 20:
Level = 20
# We need the Level as a string, so that we can use the column names
Level = str (Level)
Sides = HPTable.GetValue (Level, "SIDES")
Rolls = HPTable.GetValue (Level, "ROLLS")
Modif = HPTable.GetValue (Level, "MODIFIER")
return GemRB.Roll (Rolls, Sides, Modif)
def GetConHPBonus (pc, numPrimLevels, numSecoLevels, levelUpType):
ConHPBonTable = GemRB.LoadTable ("HPCONBON")
con = str (GemRB.GetPlayerStat (pc, IE_CON))
if levelUpType == 0:
# Pure fighter
return ConHPBonTable.GetValue (con, "WARRIOR") * numPrimLevels
if levelUpType == 1:
# Mage, Priest or Thief
return ConHPBonTable.GetValue (con, "OTHER") * numPrimLevels
return ConHPBonTable.GetValue (con, "WARRIOR") * numPrimLevels / 2 + ConHPBonTable.GetValue (con, "OTHER") * numSecoLevels / 2
def GetThac0 (Class, Level):
Thac0Table = GemRB.LoadTable ("THAC0")
# We need to check if Level is larger than 60, since after that point
# the table runs out, and the value remains the same.
if Level > 60:
Level = 60
return Thac0Table.GetValue (Class, str (Level))
# each gained level is checked for how many new prof points gained
def GainedWeapProfs (pc, currProf, currLevel, AvatarName):
# Actually looking at the next level
nextLevel = currLevel + 1
# The table stops at level 20
if nextLevel < 21:
maxProf = CommonTables.CharProfs.GetValue(str(nextLevel), AvatarName)
return maxProf - currProf
# Nameless continues gaining points forever at a rate of 1 every 3 levels
elif GUICommon.IsNamelessOne(pc) and (currProf-3) <= (nextLevel / 3):
return 1
return 0
###################################################
# End of file GUIREC.py
| gpl-2.0 | -7,488,686,639,834,815,000 | 29.823864 | 174 | 0.705622 | false |
arnaudsj/Petrel | petrel/petrel/package.py | 1 | 11356 | import os
import sys
import shutil
import getpass
import socket
import zipfile
import glob
import pkg_resources
from itertools import chain
from cStringIO import StringIO
from emitter import EmitterBase
from topologybuilder import TopologyBuilder
from util import read_yaml
MANIFEST = 'manifest.txt'
def add_to_jar(jar, name, data):
path = 'resources/%s' % name
print 'Adding %s' % path
jar.writestr(path, data)
def add_file_to_jar(jar, directory, script=None, required=True):
if script is not None:
path = os.path.join(directory, script)
else:
path = directory
# Use glob() to allow for wildcards, e.g. in manifest.txt.
path_list = glob.glob(path)
if len(path_list) == 0 and required:
raise ValueError('No files found matching: %s' % path)
#elif len(path_list) > 1:
# raise ValueError("Wildcard '%s' matches multiple files: %s" % (path, ', '.join(path_list)))
for this_path in path_list:
with open(this_path, 'r') as f:
# Assumption: Drop the path when adding to the jar.
add_to_jar(jar, os.path.basename(this_path), f.read())
def build_jar(source_jar_path, dest_jar_path, config, venv=None, definition=None, logdir=None):
"""Build a StormTopology .jar which encapsulates the topology defined in
topology_dir. Optionally override the module and function names. This
feature supports the definition of multiple topologies in a single
directory."""
if definition is None:
definition = 'create.create'
# Prepare data we'll use later for configuring parallelism.
config_yaml = read_yaml(config)
parallelism = dict((k.split('.')[-1], v) for k, v in config_yaml.iteritems()
if k.startswith('petrel.parallelism'))
pip_options = config_yaml.get('petrel.pip_options', '')
module_name, dummy, function_name = definition.rpartition('.')
topology_dir = os.getcwd()
# Make a copy of the input "jvmpetrel" jar. This jar acts as a generic
# starting point for all Petrel topologies.
source_jar_path = os.path.abspath(source_jar_path)
dest_jar_path = os.path.abspath(dest_jar_path)
if source_jar_path == dest_jar_path:
raise ValueError("Error: Destination and source path are the same.")
shutil.copy(source_jar_path, dest_jar_path)
jar = zipfile.ZipFile(dest_jar_path, 'a', compression=zipfile.ZIP_DEFLATED)
added_path_entry = False
try:
# Add the files listed in manifest.txt to the jar.
with open(os.path.join(topology_dir, MANIFEST), 'r') as f:
for fn in f.readlines():
# Ignore blank and comment lines.
fn = fn.strip()
if len(fn) and not fn.startswith('#'):
add_file_to_jar(jar, os.path.expandvars(fn.strip()))
# Add user and machine information to the jar.
add_to_jar(jar, '__submitter__.yaml', '''
petrel.user: %s
petrel.host: %s
''' % (getpass.getuser(),socket.gethostname()))
# Also add the topology configuration to the jar.
with open(config, 'r') as f:
config_text = f.read()
add_to_jar(jar, '__topology__.yaml', config_text)
# Call module_name/function_name to populate a Thrift topology object.
builder = TopologyBuilder()
module_dir = os.path.abspath(topology_dir)
if module_dir not in sys.path:
sys.path[:0] = [ module_dir ]
added_path_entry = True
module = __import__(module_name)
getattr(module, function_name)(builder)
# Add the spout and bolt Python scripts to the jar. Create a
# setup_<script>.sh for each Python script.
# Add Python scripts and any other per-script resources.
for k, v in chain(builder._spouts.iteritems(), builder._bolts.iteritems()):
add_file_to_jar(jar, topology_dir, v.script)
# Create a bootstrap script.
if venv is not None:
# Allow overriding the execution command from the "petrel"
# command line. This is handy if the server already has a
# virtualenv set up with the necessary libraries.
v.execution_command = os.path.join(venv, 'bin/python')
# If a parallelism value was specified in the configuration YAML,
# override any setting provided in the topology definition script.
if k in parallelism:
builder._commons[k].parallelism_hint = int(parallelism.pop(k))
v.execution_command, v.script = \
intercept(venv, v.execution_command, os.path.splitext(v.script)[0],
jar, pip_options, logdir)
if len(parallelism):
raise ValueError(
'Parallelism settings error: There are no components named: %s' %
','.join(parallelism.keys()))
# Build the Thrift topology object and serialize it to the .jar. Must do
# this *after* the intercept step above since that step may modify the
# topology definition.
io = StringIO()
topology = builder.write(io)
add_to_jar(jar, 'topology.ser', io.getvalue())
finally:
jar.close()
if added_path_entry:
# Undo our sys.path change.
sys.path[:] = sys.path[1:]
def intercept(venv, execution_command, script, jar, pip_options, logdir):
#create_virtualenv = 1 if execution_command == EmitterBase.DEFAULT_PYTHON else 0
create_virtualenv = 1 if venv is None else 0
script_base_name = os.path.splitext(script)[0]
intercept_script = 'setup_%s.sh' % script_base_name
# Bootstrap script that sets up the worker's Python environment.
add_to_jar(jar, intercept_script, '''#!/bin/bash
set -e
SCRIPT=%(script)s
LOG=%(logdir)s/petrel$$_$SCRIPT.log
VENV_LOG=%(logdir)s/petrel$$_virtualenv.log
echo "Beginning task setup" >>$LOG 2>&1
# I've seen Storm Supervisor crash repeatedly if we create any new
# subdirectories (e.g. Python virtualenvs) in the worker's "resources" (i.e.
# startup) directory. So we put new directories in /tmp. It seems okay to create
# individual files though, e.g. the log.
PYVER=%(major)d.%(minor)d
CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Should we also allow dots in topology names?
TOPOLOGY_ID_REGEX="([+A-Za-z0-9_\-]+)/resources$"
[[ $CWDIR =~ $TOPOLOGY_ID_REGEX ]] && TOPOLOGY_ID="${BASH_REMATCH[1]}"
WRKDIR=/tmp/petrel-$TOPOLOGY_ID
VENV=%(venv)s
CREATE_VENV=%(create_virtualenv)d
START=$SECONDS
mkdir -p $WRKDIR/egg_cache >>$LOG 2>&1
export PYTHON_EGG_CACHE=$WRKDIR/egg_cache
set +e
python$PYVER -c "print" >>/dev/null 2>&1
RETVAL=$?
set -e
if [ $RETVAL -ne 0 ]; then
# If desired Python is not found, run the user's .bashrc. Maybe it will
# add the desired Python to the path.
source ~/.bashrc >>$LOG 2>&1
fi
# Now the desired Python *must* be available. This line ensures we detect the
# error and fail before continuing.
python$PYVER -c "print" >>$LOG 2>&1
unamestr=`uname`
if [[ "$unamestr" != 'Darwin' ]]; then
# Create at most ONE virtualenv for the topology. Put the lock file in /tmp
# because when running Storm in local mode, each task runs in a different
# subdirectory. Thus they have different lock files but are creating the same
# virtualenv. This causes multiple tasks to get into the lock before the
# virtualenv has all the libraries installed.
set +e
which flock >>$LOG 2>&1
has_flock=$?
set -e
LOCKFILE="/tmp/petrel-$TOPOLOGY_ID.lock"
LOCKFD=99
# PRIVATE
_lock() { flock -$1 $LOCKFD; }
_no_more_locking() { _lock u; _lock xn && rm -f $LOCKFILE; }
_prepare_locking() { eval "exec $LOCKFD>\\"$LOCKFILE\\""; trap _no_more_locking EXIT; }
# ON START
if [ "$has_flock" -eq "0" ]
then
_prepare_locking
fi
# PUBLIC
exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail
exlock() { _lock x; } # obtain an exclusive lock
shlock() { _lock s; } # obtain a shared lock
unlock() { _lock u; } # drop a lock
if [ $CREATE_VENV -ne 0 ]; then
# On Mac OS X, the "flock" command is not available
create_new=1
if [ "$has_flock" -eq "0" ]
then
if [ -d $VENV ];then
echo "Using existing venv: $VENV" >>$LOG 2>&1
shlock
source $VENV/bin/activate >>$LOG 2>&1
unlock
create_new=0
elif ! exlock_now;then
echo "Using existing venv: $VENV" >>$LOG 2>&1
shlock
source $VENV/bin/activate >>$LOG 2>&1
unlock
create_new=0
fi
fi
if [ "$create_new" -eq "1" ]
then
echo "Creating new venv: $VENV" >>$LOG 2>&1
virtualenv --system-site-packages --python python$PYVER $VENV >>$VENV_LOG 2>&1
source $VENV/bin/activate >>$VENV_LOG 2>&1
# Ensure the version of Thrift on the worker matches our version.
# This may not matter since Petrel only uses Thrift for topology build
# and submission, but I've had some odd version problems with Thrift
# and Storm/Java so I want to be safe.
for f in simplejson==2.6.1 thrift==%(thrift_version)s PyYAML==3.10
do
echo "Installing $f" >>$VENV_LOG 2>&1
pip install %(pip_options)s $f >>$VENV_LOG 2>&1
done
easy_install petrel-*-py$PYVER.egg >>$VENV_LOG 2>&1
if [ -f ./setup.sh ]; then
/bin/bash ./setup.sh $CREATE_VENV >>$VENV_LOG 2>&1
fi
if [ "$has_flock" -eq "0" ]
then
unlock
fi
fi
else
# This is a prototype feature where the topology specifies a virtualenv
# that already exists. Could be useful in some cases, since this means the
# topology is up and running more quickly.
if ! exlock_now;then
echo "Using existing venv: $VENV" >>$LOG 2>&1
shlock
source $VENV/bin/activate >>$LOG 2>&1
unlock
else
echo "Updating pre-existing venv: $VENV" >>$LOG 2>&1
source $VENV/bin/activate >>$LOG 2>&1
easy_install -U petrel-*-py$PYVER.egg >>$VENV_LOG 2>&1
if [ -f ./setup.sh ]; then
/bin/bash ./setup.sh $CREATE_VENV >>$VENV_LOG 2>&1
fi
unlock
fi
fi
fi
ELAPSED=$(($SECONDS-$START))
echo "Task setup took $ELAPSED seconds" >>$LOG 2>&1
echo "Launching: python -m petrel.run $SCRIPT $LOG" >>$LOG 2>&1
# We use exec to avoid creating another process. Creating a second process is
# not only less efficient but also confuses the way Storm monitors processes.
exec python -m petrel.run $SCRIPT $LOG
''' % dict(
major=sys.version_info.major,
minor=sys.version_info.minor,
script=script,
venv='$WRKDIR/venv' if venv is None else venv,
logdir='$PWD' if logdir is None else logdir,
create_virtualenv=create_virtualenv,
thrift_version=pkg_resources.get_distribution("thrift").version,
pip_options=pip_options,
))
return '/bin/bash', intercept_script
| bsd-3-clause | 4,556,587,947,054,720,000 | 37.23569 | 100 | 0.609193 | false |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/vitality.py | 1 | 2618 | # Copyright (C) 2010 by
# Aric Hagberg ([email protected])
# Renato Fabbri
# Copyright (C) 2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Copyright (C) 2016-2019 by NetworkX developers.
#
# All rights reserved.
# BSD license.
"""
Vitality measures.
"""
from functools import partial
import networkx as nx
__all__ = ['closeness_vitality']
def closeness_vitality(G, node=None, weight=None, wiener_index=None):
"""Returns the closeness vitality for nodes in the graph.
The *closeness vitality* of a node, defined in Section 3.6.2 of [1],
is the change in the sum of distances between all node pairs when
excluding that node.
Parameters
----------
G : NetworkX graph
A strongly-connected graph.
weight : string
The name of the edge attribute used as weight. This is passed
directly to the :func:`~networkx.wiener_index` function.
node : object
If specified, only the closeness vitality for this node will be
returned. Otherwise, a dictionary mapping each node to its
closeness vitality will be returned.
Other parameters
----------------
wiener_index : number
If you have already computed the Wiener index of the graph
`G`, you can provide that value here. Otherwise, it will be
computed for you.
Returns
-------
dictionary or float
If `node` is None, this function returns a dictionary
with nodes as keys and closeness vitality as the
value. Otherwise, it returns only the closeness vitality for the
specified `node`.
The closeness vitality of a node may be negative infinity if
removing that node would disconnect the graph.
Examples
--------
>>> G = nx.cycle_graph(3)
>>> nx.closeness_vitality(G)
{0: 2.0, 1: 2.0, 2: 2.0}
See Also
--------
closeness_centrality
References
----------
.. [1] Ulrik Brandes, Thomas Erlebach (eds.).
*Network Analysis: Methodological Foundations*.
Springer, 2005.
<http://books.google.com/books?id=TTNhSm7HYrIC>
"""
if wiener_index is None:
wiener_index = nx.wiener_index(G, weight=weight)
if node is not None:
after = nx.wiener_index(G.subgraph(set(G) - {node}), weight=weight)
return wiener_index - after
vitality = partial(closeness_vitality, G, weight=weight,
wiener_index=wiener_index)
# TODO This can be trivially parallelized.
return {v: vitality(node=v) for v in G}
| mit | -437,198,766,088,023,200 | 29.091954 | 75 | 0.638655 | false |
certik/pyjamas | examples/slideshow/Slideshow.py | 5 | 3778 | import pyjd
from pyjamas.ui.Button import Button
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui import HasAlignment
from pyjamas.ui.Hyperlink import Hyperlink
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas import Window
from SinkList import SinkList
from pyjamas import History
import Slide
from pyjamas.HTTPRequest import HTTPRequest
from SlideLoader import SlideListLoader
from pyjamas.Timer import Timer
from pyjamas.ui.Button import Button
from pyjamas import DOM
class Slideshow:
def onHistoryChanged(self, token):
info = self.sink_list.find(token)
if info:
self.show(info, False)
else:
self.showInfo()
def onModuleLoad(self):
self.curInfo=''
self.curSink=None
self.description=HTML()
self.sink_list=SinkList()
self.panel=DockPanel()
self.b=Button("load", self)
self.sinkContainer = DockPanel()
self.sinkContainer.setStyleName("ks-Sink")
height = Window.getClientHeight()
self.sp = ScrollPanel(self.sinkContainer)
self.sp.setWidth("100%")
self.sp.setHeight("%dpx" % (height-110))
vp=VerticalPanel()
vp.setWidth("100%")
vp.setHeight("100%")
vp.add(self.description)
vp.add(self.sp)
self.description.setStyleName("ks-Intro")
self.panel.add(self.sink_list, DockPanel.WEST)
self.panel.add(vp, DockPanel.CENTER)
self.panel.setCellVerticalAlignment(self.sink_list, HasAlignment.ALIGN_TOP)
self.panel.setCellWidth(vp, "100%")
self.panel.setCellHeight(vp, "100%")
Window.addWindowResizeListener(self)
History.addHistoryListener(self)
RootPanel().add(self.panel)
RootPanel().add(self.b)
self.loadSinks()
def onClick(self, sender):
self.loadSinks()
def onTimer(self, tid):
self.loadSinks()
def onWindowResized(self, width, height):
self.sink_list.resize(width, height)
self.sp.setHeight("%dpx" % (height-110))
def show(self, info, affectHistory):
if info == self.curInfo: return
self.curInfo = info
#Logger.write("showing " + info.getName())
if self.curSink <> None:
self.curSink.onHide()
#Logger.write("removing " + self.curSink)
self.sinkContainer.remove(self.curSink)
self.curSink = info.getInstance()
self.sink_list.setSinkSelection(info.getName())
self.description.setHTML(info.getDescription())
if (affectHistory):
History.newItem(info.getName())
self.sinkContainer.add(self.curSink, DockPanel.CENTER)
self.sinkContainer.setCellWidth(self.curSink, "100%")
self.sinkContainer.setCellHeight(self.curSink, "100%")
self.sinkContainer.setCellVerticalAlignment(self.curSink, HasAlignment.ALIGN_TOP)
self.curSink.onShow()
def loadSinks(self):
HTTPRequest().asyncGet("slides.txt", SlideListLoader(self))
def setSlides(self, slides):
for l in slides:
name = l[0]
desc = l[1]
self.sink_list.addSink(Slide.init(name, desc))
#Show the initial screen.
initToken = History.getToken()
if len(initToken):
self.onHistoryChanged(initToken)
else:
self.showInfo()
def showInfo(self):
self.show(self.sink_list.sinks[0], False)
if __name__ == '__main__':
pyjd.setup("http://127.0.0.1/examples/slideshow/public/Slideshow.html")
app = Slideshow()
app.onModuleLoad()
pyjd.run()
| apache-2.0 | -8,968,549,927,613,880,000 | 27.839695 | 89 | 0.644521 | false |
C2SM-RCM/CCLM2CMOR | src/CMORlight/init_log.py | 1 | 1676 | """
Sets up custom logger
"""
import logging
def setup_custom_logger(name,filename='/dev/null',propagate=False,normal_log=True,verbose_log=False,append_log=False):
"""
Sets up custom logger and returns it
Parameters
----------
name : str
Name of the logger with which it can be called
filename : str
absolute path of the logger
propagate : bool
if logged information should also be propagated to the standard output
normal_log : bool
if True, logger gets logging level INFO; if false, the level is WARNING (unless verbose_log == True)
verbose_log : bool
if True, logger gets logging level DEBUG
append_log : bool
if True, logged information is appended to the file, if false, the file is overwritten
Returns
-------
log : logging.Logger
Custom logger
"""
if verbose_log:
level=logging.DEBUG
elif normal_log:
level=logging.INFO
else:
level=logging.WARNING
if append_log:
mode='a'
else:
mode='w'
formatter = logging.Formatter(fmt='%(levelname)s: %(message)s')
# formatter = logging.Formatter(fmt='%(asctime)s %(module)s %(levelname)s: %(message)s',datefmt='%H:%M:%S')
#formatter = logging.Formatter(fmt='%(asctime)s %(process)s %(levelname)s:%(message)s')
log = logging.getLogger(name)
log.setLevel(level)
logging.addLevelName(35,"")
fh = logging.FileHandler(filename,mode=mode)
fh.setFormatter(formatter)
log.addHandler(fh)
if propagate:
ch = logging.StreamHandler()
ch.setFormatter(formatter)
log.addHandler(ch)
return log
| gpl-3.0 | -7,992,626,526,875,689,000 | 26.032258 | 118 | 0.637828 | false |
akosel/servo | python/servo/testing_commands.py | 1 | 17196 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import argparse
import sys
import os
import os.path as path
import subprocess
from collections import OrderedDict
from distutils.spawn import find_executable
from time import time
from mach.registrar import Registrar
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase
from wptrunner import wptcommandline
from update import updatecommandline
import tidy
@CommandProvider
class MachCommands(CommandBase):
DEFAULT_RENDER_MODE = "cpu"
HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"
def __init__(self, context):
CommandBase.__init__(self, context)
if not hasattr(self.context, "built_tests"):
self.context.built_tests = False
def ensure_built_tests(self, release=False):
if self.context.built_tests:
return
returncode = Registrar.dispatch(
'build-tests', context=self.context, release=release)
if returncode:
sys.exit(returncode)
self.context.built_tests = True
def find_test(self, prefix, release=False):
build_mode = "release" if release else "debug"
target_contents = os.listdir(path.join(
self.get_target_dir(), build_mode))
for filename in target_contents:
if filename.startswith(prefix + "-"):
filepath = path.join(
self.get_target_dir(), build_mode, filename)
if path.isfile(filepath) and os.access(filepath, os.X_OK):
return filepath
def run_test(self, prefix, args=[], release=False):
t = self.find_test(prefix, release=release)
if t:
return subprocess.call([t] + args, env=self.build_env())
@Command('test',
description='Run all Servo tests',
category='testing')
@CommandArgument('params', default=None, nargs="...",
help="Optionally select test based on "
"test file directory")
@CommandArgument('--render-mode', '-rm', default=DEFAULT_RENDER_MODE,
help="The render mode to be used on all tests. " +
HELP_RENDER_MODE)
@CommandArgument('--release', default=False, action="store_true",
help="Run with a release build of servo")
def test(self, params, render_mode=DEFAULT_RENDER_MODE, release=False):
suites = OrderedDict([
("tidy", {}),
("ref", {"kwargs": {"kind": render_mode},
"path": path.abspath(path.join("tests", "ref")),
"include_arg": "name"}),
("wpt", {"kwargs": {"release": release},
"path": path.abspath(path.join("tests", "wpt", "web-platform-tests")),
"include_arg": "include"}),
("css", {"kwargs": {"release": release},
"path": path.abspath(path.join("tests", "wpt", "css-tests")),
"include_arg": "include"}),
("unit", {}),
])
suites_by_prefix = {v["path"]: k for k, v in suites.iteritems() if "path" in v}
selected_suites = OrderedDict()
if params is None:
params = suites.keys()
for arg in params:
found = False
if arg in suites and arg not in selected_suites:
selected_suites[arg] = []
found = True
elif os.path.exists(path.abspath(arg)):
abs_path = path.abspath(arg)
for prefix, suite in suites_by_prefix.iteritems():
if abs_path.startswith(prefix):
if suite not in selected_suites:
selected_suites[suite] = []
selected_suites[suite].append(arg)
found = True
break
if not found:
print("%s is not a valid test path or suite name" % arg)
return 1
test_start = time()
for suite, tests in selected_suites.iteritems():
props = suites[suite]
kwargs = props.get("kwargs", {})
if tests:
kwargs[props["include_arg"]] = tests
Registrar.dispatch("test-%s" % suite, context=self.context, **kwargs)
elapsed = time() - test_start
print("Tests completed in %0.2fs" % elapsed)
@Command('test-unit',
description='Run unit tests',
category='testing')
@CommandArgument('--package', '-p', default=None, help="Specific package to test")
@CommandArgument('test_name', nargs=argparse.REMAINDER,
help="Only run tests that match this pattern")
def test_unit(self, test_name=None, package=None):
if test_name is None:
test_name = []
self.ensure_bootstrapped()
if package:
packages = [package]
else:
packages = os.listdir(path.join(self.context.topdir, "tests", "unit"))
for crate in packages:
result = subprocess.call(
["cargo", "test", "-p", "%s_tests" % crate] + test_name,
env=self.build_env(), cwd=self.servo_crate())
if result != 0:
return result
@Command('test-ref',
description='Run the reference tests',
category='testing')
@CommandArgument('--kind', '-k', default=DEFAULT_RENDER_MODE,
help=HELP_RENDER_MODE)
@CommandArgument('--release', '-r', action='store_true',
help='Run with a release build of Servo')
@CommandArgument('--name', default=None,
help="Only run tests that match this pattern. If the "
"path to the ref test directory is included, it "
"will automatically be trimmed out.")
@CommandArgument(
'servo_params', default=None, nargs=argparse.REMAINDER,
help="Command-line arguments to be passed through to Servo")
def test_ref(self, kind=DEFAULT_RENDER_MODE, name=None, servo_params=None,
release=False):
self.ensure_bootstrapped()
self.ensure_built_tests(release=release)
assert kind is not None, 'kind cannot be None, see help'
kinds = ["cpu", "gpu"] if kind == 'both' else [kind]
test_path = path.join(self.context.topdir, "tests", "ref")
error = False
test_start = time()
for k in kinds:
print("Running %s reftests..." % k)
test_args = [k, test_path]
if name is not None:
maybe_path = path.normpath(name)
ref_path = path.join("tests", "ref")
# Check to see if we were passed something leading with the
# path to the ref test directory, and trim it so that reftest
# knows how to filter it.
if ref_path in maybe_path:
test_args.append(path.relpath(maybe_path, ref_path))
else:
test_args.append(name)
if servo_params is not None:
test_args += ["--"] + servo_params
ret = self.run_test("reftest", test_args, release=release)
error = error or ret != 0
elapsed = time() - test_start
print("Reference tests completed in %0.2fs" % elapsed)
if error:
return 1
@Command('test-devtools',
description='Run the devtools tests',
category='testing')
@CommandArgument('--name', default=None,
help="Only run tests that match this pattern. If the "
"path to the ref test directory is included, it "
"will automatically be trimmed out.")
def test_devtools(self, name=None):
self.ensure_bootstrapped()
self.ensure_built_tests()
error = False
test_start = time()
ret = self.run_test("devtools_test")
error = error or ret != 0
elapsed = time() - test_start
print("Devtools tests completed in %0.2fs" % elapsed)
if error:
return 1
@Command('test-content',
description='Run the content tests',
category='testing')
def test_content(self):
print("Content tests have been replaced by web-platform-tests under "
"tests/wpt/mozilla/.")
return 0
@Command('test-tidy',
description='Run the source code tidiness check',
category='testing')
def test_tidy(self):
return tidy.scan()
@Command('test-wpt-failure',
description='Run the web platform tests',
category='testing')
def test_wpt_failure(self):
self.ensure_bootstrapped()
return not subprocess.call([
"bash",
path.join("tests", "wpt", "run.sh"),
"--no-pause-after-test",
"--include",
"infrastructure/failing-test.html"
], env=self.build_env())
@Command('test-wpt',
description='Run the web platform tests',
category='testing',
parser=wptcommandline.create_parser)
@CommandArgument('--release', default=False, action="store_true",
help="Run with a release build of servo")
def test_wpt(self, **kwargs):
self.ensure_bootstrapped()
self.ensure_wpt_virtualenv()
hosts_file_path = path.join(self.context.topdir, 'tests', 'wpt', 'hosts')
os.environ["hosts_file_path"] = hosts_file_path
kwargs["debug"] = not kwargs["release"]
run_file = path.abspath(path.join(self.context.topdir, "tests", "wpt", "run_wpt.py"))
run_globals = {"__file__": run_file}
execfile(run_file, run_globals)
return run_globals["run_tests"](**kwargs)
@Command('update-wpt',
description='Update the web platform tests',
category='testing',
parser=updatecommandline.create_parser())
def update_wpt(self, **kwargs):
self.ensure_bootstrapped()
self.ensure_wpt_virtualenv()
run_file = path.abspath(path.join("tests", "wpt", "update.py"))
run_globals = {"__file__": run_file}
execfile(run_file, run_globals)
return run_globals["update_tests"](**kwargs)
@Command('test-jquery',
description='Run the jQuery test suite',
category='testing')
@CommandArgument('--release', '-r', action='store_true',
help='Run the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Run the dev build')
def test_jquery(self, release, dev):
return self.jquery_test_runner("test", release, dev)
@Command('test-dromaeo',
description='Run the Dromaeo test suite',
category='testing')
@CommandArgument('tests', default=["recommended"], nargs="...",
help="Specific tests to run")
@CommandArgument('--release', '-r', action='store_true',
help='Run the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Run the dev build')
def test_dromaeo(self, tests, release, dev):
return self.dromaeo_test_runner(tests, release, dev)
@Command('update-jquery',
description='Update the jQuery test suite expected results',
category='testing')
@CommandArgument('--release', '-r', action='store_true',
help='Run the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Run the dev build')
def update_jquery(self, release, dev):
return self.jquery_test_runner("update", release, dev)
@Command('test-css',
description='Run the web platform tests',
category='testing',
parser=wptcommandline.create_parser())
@CommandArgument('--release', default=False, action="store_true",
help="Run with a release build of servo")
def test_css(self, **kwargs):
self.ensure_bootstrapped()
self.ensure_wpt_virtualenv()
run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
run_globals = {"__file__": run_file}
execfile(run_file, run_globals)
return run_globals["run_tests"](**kwargs)
@Command('update-css',
description='Update the web platform tests',
category='testing',
parser=updatecommandline.create_parser())
def update_css(self, **kwargs):
self.ensure_bootstrapped()
self.ensure_wpt_virtualenv()
run_file = path.abspath(path.join("tests", "wpt", "update_css.py"))
run_globals = {"__file__": run_file}
execfile(run_file, run_globals)
return run_globals["update_tests"](**kwargs)
def ensure_wpt_virtualenv(self):
virtualenv_path = path.join(self.context.topdir, "tests", "wpt", "_virtualenv")
python = self.get_exec("python2", "python")
if not os.path.exists(virtualenv_path):
virtualenv = self.get_exec("virtualenv2", "virtualenv")
subprocess.check_call([virtualenv, "-p", python, virtualenv_path])
activate_path = path.join(virtualenv_path, "bin", "activate_this.py")
execfile(activate_path, dict(__file__=activate_path))
try:
import wptrunner # noqa
from wptrunner.browsers import servo # noqa
except ImportError:
subprocess.check_call(["pip", "install", "-r",
path.join(self.context.topdir, "tests", "wpt",
"harness", "requirements.txt")])
subprocess.check_call(["pip", "install", "-r",
path.join(self.context.topdir, "tests", "wpt",
"harness", "requirements_servo.txt")])
try:
import blessings
except ImportError:
subprocess.check_call(["pip", "install", "blessings"])
# This is an unfortunate hack. Because mozlog gets imported by wptcommandline
# before the virtualenv is initalised it doesn't see the blessings module so we don't
# get coloured output. Setting the blessings global explicitly fixes that.
from mozlog.structured.formatters import machformatter
import blessings # noqa
machformatter.blessings = blessings
def get_exec(self, name, default=None):
path = find_executable(name)
if not path:
return default
return path
def jquery_test_runner(self, cmd, release, dev):
self.ensure_bootstrapped()
base_dir = path.abspath(path.join("tests", "jquery"))
jquery_dir = path.join(base_dir, "jquery")
run_file = path.join(base_dir, "run_jquery.py")
# Clone the jQuery repository if it doesn't exist
if not os.path.isdir(jquery_dir):
subprocess.check_call(
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/servo/jquery", jquery_dir])
# Run pull in case the jQuery repo was updated since last test run
subprocess.check_call(
["git", "-C", jquery_dir, "pull"])
# Check that a release servo build exists
bin_path = path.abspath(self.get_binary_path(release, dev))
return subprocess.check_call(
[run_file, cmd, bin_path, base_dir])
def dromaeo_test_runner(self, tests, release, dev):
self.ensure_bootstrapped()
base_dir = path.abspath(path.join("tests", "dromaeo"))
dromaeo_dir = path.join(base_dir, "dromaeo")
run_file = path.join(base_dir, "run_dromaeo.py")
# Clone the Dromaeo repository if it doesn't exist
if not os.path.isdir(dromaeo_dir):
subprocess.check_call(
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/notriddle/dromaeo", dromaeo_dir])
# Run pull in case the Dromaeo repo was updated since last test run
subprocess.check_call(
["git", "-C", dromaeo_dir, "pull"])
# Compile test suite
subprocess.check_call(
["make", "-C", dromaeo_dir, "web"])
# Check that a release servo build exists
bin_path = path.abspath(self.get_binary_path(release, dev))
return subprocess.check_call(
[run_file, "|".join(tests), bin_path, base_dir])
| mpl-2.0 | 7,995,225,851,598,871,000 | 38.531034 | 117 | 0.567748 | false |
unixhot/opencmdb | util/fields_validation.py | 1 | 22384 | # coding=utf-8
import datetime
from item.models import ItemCategory, Item
import re
from jsonschema import validate
import json
def validate_item_field(attr_value, attr_form):
"""
:param attr_value: item的属性
:param attr_form: item category的属性规则
:return:
"""
if not isinstance(attr_form, dict):
return -1, {"error": "attr_form is not a dict."}
required = attr_form.get('required')
if required == 'false':
return 0, {"msg": "success"}
field = attr_form.get('field')
if not field:
return -1, {"error": "field missed."}
if field == "string":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]):
return -1, {"error": "invalid string length."}
if attr_form.get('valid_rule') == "none":
return 0, {"msg": "success"}
elif attr_form.get('valid_rule') == "IPaddress":
pattern = re.compile(r'\d+\.\d+\.\d+\.\d+') # 匹配IP地址有待改进
elif attr_form.get('valid_rule') == "email":
pattern = re.compile(r'^(\w)+(\.\w+)*@(\w)+((\.\w+)+)$')
elif attr_form.get('valid_rule') == "phone":
pattern = re.compile(r'^\d{11}$')
else:
return -1, {"error": "invalid valid_rule."}
match = pattern.match(attr_value)
if not match:
return -1, {"error": "did not match rule: %s" % attr_form.get('valid_rule')}
elif field == "text":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]):
return -1, {"error": "invalid string length."}
elif field == "select":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a dict."}
if attr_value not in attr_form["choice"][1:-1].split("|"):
return -1, {"error": "invalid choice."}
elif field == "multiple_select":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a dict."}
for each in attr_value.split("|"):
if each not in attr_form["choice"][1:-1].split("|"):
return -1, {"error": "invalid choice."}
elif field == "integer":
if not isinstance(attr_value, int):
return -1, {"error": "attr_value is not a integer."}
if attr_value < int(attr_form["min_value"]) or attr_value > int(attr_form["max_value"]):
return -1, {"error": "invalid integer value."}
elif field == "datetime":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
try:
date_object = datetime.datetime.strptime(
attr_value, '%Y%m%d%H%M%S')
except ValueError:
return -1, {"error": "time data '%s' does not match format" % attr_value}
elif field == "reference":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
item_obj = Item.objects(id=attr_value)
if not item_obj:
return -1, {"error": "unknown item."}
if item_obj.category.id != attr_form["reference"]:
return -1, {"error": "wrong category."}
return 0, {"msg": "success"}
_fields_comment = {
'string': "单行文本",
'text': "多行文本",
'select': "单选",
'multi_select': "多选",
'image': "图片",
'number': "数值",
'datetime': "日期时间",
'required': "是否必填",
'name': "字段名称",
'default': "默认",
'max': "最大/最长",
'min': "最小/最短",
'unit': "单位",
'choice': "选项(以|分割)",
'field': "字段类型",
'key': "关键词",
'ref': "模型引用",
'reference': "模型引用",
# 'reference': {
# "required": r'^(True)|(False)$',
# "reference": r'^\w{24}$',
# }, # 引用
# 'image': {
# "required": r'^(True)|(False)$',
# }, # 图片
}
# json_schema
_valid_fields = {
'string': {
"required": True,
"type": "object",
"id": "id",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
"default":
{
"required": True,
"type": "string",
"id": "default",
"maxLength": 100000,
"minLength": 0,
},
"max": {
"required": True,
"type": "number",
"id": "max",
"minimum": 0
},
"min": {
"required": True,
"type": "number",
"id": "max",
"maximum": 100000000
},
}
},
'text': {
"required": True,
"type": "object",
"id": "id",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
"default":
{
"required": True,
"type": "string",
"id": "default"
},
}
},
'number': {
"required": True,
"type": "object",
"id": "id",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
"max": {
"required": True,
"type": "number",
"id": "max",
"minimum": 0
},
"min": {
"required": True,
"type": "number",
"id": "max",
"maximum": 100000000
},
"unit": {
"required": True,
"type": "string",
"id": "unit"
},
"default":
{
"required": True,
"type": "number",
"id": "default",
"maximum": 100000000,
"minimum": 0
},
}
},
"select": {
"required": True,
"type": "object",
"id": "machine_type",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
"choice": {
"required": True,
"type": "string",
"id": "choice",
"pattern": r'^(.+|)*(.+)$'
}
}
},
"multi_select": {
"required": True,
"type": "object",
"id": "machine_type",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
"choice": {
"required": True,
"type": "string",
"id": "choice",
"pattern": r'^(.+|)*(.+)$'
}
}
},
"datetime": {
"required": True,
"type": "object",
"id": "machine_type",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
# "choice": {
# "required": True,
# "type": "string",
# "id": "choice",
# "pattern":r'^(\d{2}|\d{4})(?:\-)?([0]{1}\d{1}|[1]{1}[0-2]{1})(?:\-)?([0-2]{1}\d{1}|[3]{1}[0-1]{1})(?:\s)?([0-1]{1}\d{1}|[2]{1}[0-3]{1})(?::)?([0-5]{1}\d{1})(?::)?([0-5]{1}\d{1})$'
# }
}
},
"image": {
"required": True,
"type": "object",
"id": "image",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
# "choice": {
# "required": True,
# "type": "string",
# "id": "choice",
# "pattern":r'^(\d{2}|\d{4})(?:\-)?([0]{1}\d{1}|[1]{1}[0-2]{1})(?:\-)?([0-2]{1}\d{1}|[3]{1}[0-1]{1})(?:\s)?([0-1]{1}\d{1}|[2]{1}[0-3]{1})(?::)?([0-5]{1}\d{1})(?::)?([0-5]{1}\d{1})$'
# }
}
},
"ref": {
"required": True,
"type": "object",
"id": "image",
"properties": {
"field": {
"required": True,
"type": "string",
"id": "field"
},
"required": {
"required": True,
"type": "boolean",
"id": "required"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"key": {
"required": True,
"type": "string",
"id": "key"
},
"reference": {
"required": True,
"type": "string",
"id": "key"
},
# "choice": {
# "required": True,
# "type": "string",
# "id": "choice",
# "pattern":r'^(\d{2}|\d{4})(?:\-)?([0]{1}\d{1}|[1]{1}[0-2]{1})(?:\-)?([0-2]{1}\d{1}|[3]{1}[0-1]{1})(?:\s)?([0-1]{1}\d{1}|[2]{1}[0-3]{1})(?::)?([0-5]{1}\d{1})(?::)?([0-5]{1}\d{1})$'
# }
}
},
}
_valid_item_fields = {
'string': {
"required": True,
"type": "string",
"id": "id",
"maxLength": 100000,
"minLength": 0,
},
'text': {
"required": True,
"type": "string",
"id": "id",
},
'number': {
"required": True,
"type": "string",
"id": "id",
"minimum": 0,
"maximum": 0,
},
"select": {
"required": True,
"type": "string",
"id": "machine_type",
},
"multi_select": {
"required": True,
"type": "string",
"id": "machine_type",
},
"datetime": {
"required": True,
"type": "string",
"id": "machine_type",
},
"image": {
"required": True,
"type": "string",
"id": "machine_type",
},
"ref": {
"required": True,
"type": "string",
"id": "machine_type",
},
}
def gemerating_category_schema(s_data):
json_schema = {
"$schema": "http://json-schema.org/draft-03/schema#",
"required": True,
# "type": "object",
"id": "#",
"properties": {}
}
for data in s_data:
attr_name = data.get("key")
if not isinstance(data, dict):
return -1, {"error": "Data is not a dict."}
field = data.get("field")
# print(attr_name)
# print(field)
if field not in _valid_fields:
return -1, {"error": "Unknown attribute filed type."}
schema_bean = _valid_fields[field]
schema_bean['id'] = attr_name
# print(schema_bean)
_max = data.get("max", -10000)
_min = data.get("min", -10000)
if not _max or not _min or _max < _min:
return -1, {"error": "%s maximum and mininum not correct." % (attr_name)}
if _max > 0:
schema_bean['properties']['min']['maximum'] = _max
schema_bean['properties']['max']['mininum'] = _min
if field == "number":
schema_bean['properties']['default']['maximum'] = _max
schema_bean['properties']['default']['mininum'] = _min
if field == "string":
schema_bean['properties']['default']['maxLength'] = _max
schema_bean['properties']['default']['minLength'] = _min
json_schema['properties'][attr_name] = schema_bean
return 0, json_schema
def gemerating_item_schema(s_data):
json_schema = {
"$schema": "http://json-schema.org/draft-03/schema#",
"required": True,
# "type": "object",
"id": "#",
"properties": {
"category": {
"required": True,
"type": "string",
"id": "category"
},
"name": {
"required": True,
"type": "string",
"id": "name"
},
"id": {
"required": False,
"type": "string",
"id": "id"
},
}
}
for data in s_data:
attr_name = data.get("key")
if not isinstance(data, dict):
return -1, {"error": "Data is not a dict."}
field = data.get("field")
if field not in _valid_item_fields:
return -1, {"error": "Unknown attribute filed type."}
schema_bean = _valid_item_fields[field]
schema_bean['id'] = attr_name
# print(schema_bean)
_max = data.get("max", -10000)
_min = data.get("min", -10000)
if not _max or not _min or _max < _min:
return -1, {"error": "%s maximum and mininum not correct." % (attr_name)}
if field == "number":
schema_bean['maximum'] = _max
schema_bean['mininum'] = _min
if field == "string":
schema_bean['maxLength'] = _max
schema_bean['minLength'] = _min
json_schema[attr_name] = schema_bean
return 0, json_schema
from item.serializers import ItemCategorySerializer, ItemSerializer
def validate_item_structure(data):
"""
:param data: 待验证的Item数据,是一个dict
:return:
"""
if not isinstance(data, dict):
return -1, {"error": "Data is not a dict."}
category = data.get("category")
if not category:
return -1, {"error": "Category is not defined."}
item_category_object = ItemCategory.objects.get(id=category)
if not item_category_object:
return -1, {"error": "Category does not exists."}
serializer = ItemCategorySerializer(item_category_object)
struct = []
for k in serializer.data['structure']:
if k == 'hidden':
continue
for v in serializer.data['structure'][k]:
if len(v) > 0:
struct.append(v)
json_shema_res = gemerating_item_schema(struct)
if json_shema_res[0] < 0:
return -1, json_shema_res[1]
try:
# print(json_shema_res)
validate(data, json_shema_res[1])
except Exception as e:
print(e)
else:
print("json good")
return 0, {"msg": "success"}
for attr_group_name, attr_group_value in field_data.items():
if attr_group_name not in item_category_object.structure:
return -1, {"error": "Invaild attr_group:%s" % attr_group_name}
for attr_name, attr_value in attr_group_value.items():
if attr_name not in item_category_object.structure[attr_group_name]:
return -1, {"error": "Invaild attr:%s" % attr_name}
state, msg = validate_item_field(attr_value, item_category_object.structure[
attr_group_name][attr_name])
if state != 0:
return state, msg
return 0, {"msg": "success"}
def validate_category_structure(raw_data): # 检验模型定义字段
structure = raw_data.get("structure")
if not structure:
return -1, {"error": "raw Data missed attr: structure"}
if not isinstance(structure, dict):
return -1, {"error": "structure is not a dict."}
for cpg_index in structure:
if cpg_index == "hidden":
continue
s_data = structure[cpg_index]
if not isinstance(s_data, list):
return -1, {"error": "s_data is not a dict."}
json_shema_res = gemerating_category_schema(s_data)
if json_shema_res[0] < 0:
return -1, json_shema_res[1]
try:
validate(s_data, json_shema_res[1])
except Exception as e:
print(e)
else:
print("json good")
for data in s_data:
attr_name = data.get("key")
field = data.get("field")
if field == 'reference':
item_category_obj = ItemCategory.objects(id=field["reference"])
if not item_category_obj:
return -1, {"error": "unknown reference."}
return 0, {"msg": "success"}
# structure = {
# "default": [
# {
# "key":"ip",
# "name": "ip地址",
# "field": "string",
# "min": 1,
# "max": 200,
# "default": "1000",
# "required": True
# },
# {
# "key":"cpu",
# "name": "cpu",
# "field": "string",
# "max": 200,
# "min": 1,
# "default": "1000",
# "required": True
# },
# {
# "key":"memory",
# "name": "内存",
# "field": "number",
# "min": 10,
# "max": 200,
# "required": True,
# "unit":"Gb",
# "default": 100,
# },
# {
# "key":"machine_type",
# "name": "机器类型",
# "field": "select",
# "required": True,
# "choice": "虚拟机|物理机|云主机",
# },
# {
# "key":"mul_test",
# "name": "机器类型",
# "field": "multi_select",
# "required": True,
# "choice": "test1|test2|test3",
# },
# {
# "key":"buytime",
# "name": "购买日期",
# "field": "datetime",
# "required": True,
# },
# ]
# }
# s_data = structure['default']
# json_shema = gemerating_category_schema(s_data)
# print(json.dumps(json_shema[1]))
# try:
# validate(s_data, json_shema[1])
# except Exception as e:
# print(e.message)
# else:
# print("json good")
# print(validate_category_structure({"structure":structure}))
s_data1 = {"name": "234", "id": "", "category": "58953a76cc8b7914090dea76",
"size": "1", "ip": "127.0.0.1", "port": "3307"}
s_data = [
{
"required": "",
"default": "1",
"name": "size",
"field": "number",
"min": 1,
"unit": "a",
"key": "size",
"max": 1
},
{
"required": "",
"default": "127.0.0.1",
"key": "ip",
"name": "ip",
"max": 20,
"field": "string",
"min": 1
},
{
"required": "",
"default": "3307",
"name": "端口",
"field": "number",
"min": 4,
"unit": "",
"key": "port",
"max": 8
}
]
json_shema = gemerating_item_schema(s_data)
# print(json.dumps(json_shema[1]))
try:
validate(s_data1, json_shema[1])
except Exception as e:
print(e.message)
else:
print("json good")
# print(validate_item_structure(s_data1))
| apache-2.0 | 8,529,370,498,095,866,000 | 27.857888 | 201 | 0.400786 | false |
ollie314/browserscope | static_mode/selectors_1.py | 9 | 25266 | (dp0
VCrazy Browser 3
p1
(dp2
S'summary_display'
p3
S'0%'
p4
sS'total_runs'
p5
L1L
sS'summary_score'
p6
I100
sS'results'
p7
(dp8
S'failed'
p9
(dp10
S'score'
p11
I50
sS'raw_score'
p12
L1816L
sS'display'
p13
S'1816'
p14
ssS'passed'
p15
(dp16
g11
I50
sg12
L0L
sg13
S'0'
p17
ssssVK-Meleon 1
p18
(dp19
S'summary_display'
p20
S'0%'
p21
sS'total_runs'
p22
L8L
sS'summary_score'
p23
I100
sS'results'
p24
(dp25
S'failed'
p26
(dp27
S'score'
p28
I50
sS'raw_score'
p29
L2164L
sS'display'
p30
S'2164'
p31
ssS'passed'
p32
(dp33
g28
I50
sg29
L0L
sg30
g17
ssssVMaxthon 0
p34
(dp35
S'summary_display'
p36
S'0%'
p37
sS'total_runs'
p38
L1L
sS'summary_score'
p39
I100
sS'results'
p40
(dp41
S'failed'
p42
(dp43
S'score'
p44
I50
sS'raw_score'
p45
L1816L
sS'display'
p46
S'1816'
p47
ssS'passed'
p48
(dp49
g44
I50
sg45
L0L
sg46
g17
ssssVSafari 1
p50
(dp51
S'summary_display'
p52
S'80.6%'
p53
sS'total_runs'
p54
L1L
sS'summary_score'
p55
F80.637707948244
sS'results'
p56
(dp57
S'failed'
p58
(dp59
S'score'
p60
I50
sS'raw_score'
p61
L419L
sS'display'
p62
S'419'
p63
ssS'passed'
p64
(dp65
g60
I50
sg61
L1745L
sg62
S'1745'
p66
ssssVMaxthon 2
p67
(dp68
S'summary_display'
p69
S'0%'
p70
sS'total_runs'
p71
I12
sS'summary_score'
p72
I100
sS'results'
p73
(dp74
S'failed'
p75
(dp76
S'score'
p77
I50
sS'raw_score'
p78
I1816
sS'display'
p79
S'1816'
p80
ssS'passed'
p81
(dp82
g77
I50
sg78
I0
sg79
g17
ssssVMaxthon 3
p83
(dp84
S'summary_display'
p85
S'99.3%'
p86
sS'total_runs'
p87
L6L
sS'summary_score'
p88
F99.260628465804061
sS'results'
p89
(dp90
S'failed'
p91
(dp92
S'score'
p93
I75
sS'raw_score'
p94
L16L
sS'display'
p95
S'16'
p96
ssS'passed'
p97
(dp98
g93
I95
sg94
L2148L
sg95
S'2148'
p99
ssssVShiira 0
p100
(dp101
S'summary_display'
p102
S'99.3%'
p103
sS'total_runs'
p104
L1L
sS'summary_score'
p105
F99.260628465804061
sS'results'
p106
(dp107
S'failed'
p108
(dp109
S'score'
p110
I75
sS'raw_score'
p111
L16L
sS'display'
p112
S'16'
p113
ssS'passed'
p114
(dp115
g110
I95
sg111
L2148L
sg112
S'2148'
p116
ssssVChrome Frame (IE 8) 4
p117
(dp118
S'summary_display'
p119
S'99.3%'
p120
sS'total_runs'
p121
L12L
sS'summary_score'
p122
F99.260628465804061
sS'results'
p123
(dp124
S'failed'
p125
(dp126
S'score'
p127
I75
sS'raw_score'
p128
L16L
sS'display'
p129
S'16'
p130
ssS'passed'
p131
(dp132
g127
I95
sg128
L2148L
sg129
S'2148'
p133
ssssVNetscape 9
p134
(dp135
S'summary_display'
p136
S'0%'
p137
sS'total_runs'
p138
L1L
sS'summary_score'
p139
I100
sS'results'
p140
(dp141
S'failed'
p142
(dp143
S'score'
p144
I50
sS'raw_score'
p145
L2164L
sS'display'
p146
S'2164'
p147
ssS'passed'
p148
(dp149
g144
I50
sg145
L0L
sg146
g17
ssssVOpera Mini 5
p150
(dp151
S'summary_display'
p152
S'97.5%'
p153
sS'total_runs'
p154
L4L
sS'summary_score'
p155
F97.504621072088725
sS'results'
p156
(dp157
S'failed'
p158
(dp159
S'score'
p160
I50
sS'raw_score'
p161
L54L
sS'display'
p162
S'54'
p163
ssS'passed'
p164
(dp165
g160
I95
sg161
L2110L
sg162
S'2110'
p166
ssssVFirefox (Namoroka) 3
p167
(dp168
S'summary_display'
p169
S'99.3%'
p170
sS'total_runs'
p171
L47L
sS'summary_score'
p172
F99.260628465804061
sS'results'
p173
(dp174
S'failed'
p175
(dp176
S'score'
p177
I75
sS'raw_score'
p178
L16L
sS'display'
p179
S'16'
p180
ssS'passed'
p181
(dp182
g177
I95
sg178
L2148L
sg179
S'2148'
p183
ssssVIE 8
p184
(dp185
S'summary_display'
p186
S'58.5%'
p187
sS'total_runs'
p188
I424
sS'summary_score'
p189
F58.456561922365992
sS'results'
p190
(dp191
S'failed'
p192
(dp193
S'score'
p194
I50
sS'raw_score'
p195
I899
sS'display'
p196
S'899'
p197
ssS'passed'
p198
(dp199
g194
I50
sg195
I1265
sg196
S'1265'
p200
ssssVLunascape 4
p201
(dp202
S'summary_display'
p203
S'97.8%'
p204
sS'total_runs'
p205
L1L
sS'summary_score'
p206
F97.781885397412196
sS'results'
p207
(dp208
S'failed'
p209
(dp210
S'score'
p211
I50
sS'raw_score'
p212
L48L
sS'display'
p213
S'48'
p214
ssS'passed'
p215
(dp216
g211
I95
sg212
L2116L
sg213
S'2116'
p217
ssssVOpera 10
p218
(dp219
S'summary_display'
p220
S'99.8%'
p221
sS'total_runs'
p222
I1230
sS'summary_score'
p223
F99.815157116451019
sS'results'
p224
(dp225
S'failed'
p226
(dp227
S'score'
p228
I85
sS'raw_score'
p229
I4
sS'display'
p230
S'4'
p231
ssS'passed'
p232
(dp233
g228
I95
sg229
I2160
sg230
S'2160'
p234
ssssVMaemo Browser 1
p235
(dp236
S'summary_display'
p237
S'99.3%'
p238
sS'total_runs'
p239
L1L
sS'summary_score'
p240
F99.260628465804061
sS'results'
p241
(dp242
S'failed'
p243
(dp244
S'score'
p245
I75
sS'raw_score'
p246
L16L
sS'display'
p247
S'16'
p248
ssS'passed'
p249
(dp250
g245
I95
sg246
L2148L
sg247
S'2148'
p251
ssssVMidori 0
p252
(dp253
S'summary_display'
p254
S'99.3%'
p255
sS'total_runs'
p256
L18L
sS'summary_score'
p257
F99.260628465804061
sS'results'
p258
(dp259
S'failed'
p260
(dp261
S'score'
p262
I75
sS'raw_score'
p263
L16L
sS'display'
p264
S'16'
p265
ssS'passed'
p266
(dp267
g262
I95
sg263
L2148L
sg264
S'2148'
p268
ssssVIE 6
p269
(dp270
S'summary_display'
p271
S'0%'
p272
sS'total_runs'
p273
I102
sS'summary_score'
p274
I100
sS'results'
p275
(dp276
S'failed'
p277
(dp278
S'score'
p279
I50
sS'raw_score'
p280
I1816
sS'display'
p281
S'1816'
p282
ssS'passed'
p283
(dp284
g279
I50
sg280
I0
sg281
g17
ssssVIE 7
p285
(dp286
S'summary_display'
p287
S'0%'
p288
sS'total_runs'
p289
I191
sS'summary_score'
p290
I100
sS'results'
p291
(dp292
S'failed'
p293
(dp294
S'score'
p295
I50
sS'raw_score'
p296
I1816
sS'display'
p297
S'1816'
p298
ssS'passed'
p299
(dp300
g295
I50
sg296
I0
sg297
g17
ssssVIE 4
p301
(dp302
S'summary_display'
p303
S'0%'
p304
sS'total_runs'
p305
L1L
sS'summary_score'
p306
I100
sS'results'
p307
(dp308
S'failed'
p309
(dp310
S'score'
p311
I50
sS'raw_score'
p312
L2164L
sS'display'
p313
S'2164'
p314
ssS'passed'
p315
(dp316
g311
I50
sg312
L0L
sg313
g17
ssssVIE 5
p317
(dp318
S'summary_display'
p319
S'0%'
p320
sS'total_runs'
p321
L1L
sS'summary_score'
p322
I100
sS'results'
p323
(dp324
S'failed'
p325
(dp326
S'score'
p327
I50
sS'raw_score'
p328
L1816L
sS'display'
p329
S'1816'
p330
ssS'passed'
p331
(dp332
g327
I50
sg328
L0L
sg329
g17
ssssVFirefox 2
p333
(dp334
S'summary_display'
p335
S'0%'
p336
sS'total_runs'
p337
L42L
sS'summary_score'
p338
I100
sS'results'
p339
(dp340
S'failed'
p341
(dp342
S'score'
p343
I50
sS'raw_score'
p344
L2164L
sS'display'
p345
S'2164'
p346
ssS'passed'
p347
(dp348
g343
I50
sg344
L0L
sg345
g17
ssssVFirefox 3
p349
(dp350
S'summary_display'
p351
S'99.3%'
p352
sS'total_runs'
p353
I4564
sS'summary_score'
p354
F99.260628465804061
sS'results'
p355
(dp356
S'failed'
p357
(dp358
S'score'
p359
I75
sS'raw_score'
p360
I16
sS'display'
p361
S'16'
p362
ssS'passed'
p363
(dp364
g359
I95
sg360
I2148
sg361
S'2148'
p365
ssssVChrome 4
p366
(dp367
S'summary_display'
p368
S'99.3%'
p369
sS'total_runs'
p370
I989
sS'summary_score'
p371
F99.260628465804061
sS'results'
p372
(dp373
S'failed'
p374
(dp375
S'score'
p376
I75
sS'raw_score'
p377
I16
sS'display'
p378
S'16'
p379
ssS'passed'
p380
(dp381
g376
I95
sg377
I2148
sg378
S'2148'
p382
ssssVFirefox 1
p383
(dp384
S'summary_display'
p385
S'0%'
p386
sS'total_runs'
p387
L6L
sS'summary_score'
p388
I100
sS'results'
p389
(dp390
S'failed'
p391
(dp392
S'score'
p393
I50
sS'raw_score'
p394
L2164L
sS'display'
p395
S'2164'
p396
ssS'passed'
p397
(dp398
g393
I50
sg394
L0L
sg395
g17
ssssVChrome 2
p399
(dp400
S'summary_display'
p401
S'99.3%'
p402
sS'total_runs'
p403
L204L
sS'summary_score'
p404
F99.260628465804061
sS'results'
p405
(dp406
S'failed'
p407
(dp408
S'score'
p409
I75
sS'raw_score'
p410
L16L
sS'display'
p411
S'16'
p412
ssS'passed'
p413
(dp414
g409
I95
sg410
L2148L
sg411
S'2148'
p415
ssssVChrome 3
p416
(dp417
S'summary_display'
p418
S'99.3%'
p419
sS'total_runs'
p420
I560
sS'summary_score'
p421
F99.260628465804061
sS'results'
p422
(dp423
S'failed'
p424
(dp425
S'score'
p426
I75
sS'raw_score'
p427
I16
sS'display'
p428
S'16'
p429
ssS'passed'
p430
(dp431
g426
I95
sg427
I2148
sg428
S'2148'
p432
ssssVChrome 0
p433
(dp434
S'summary_display'
p435
S'80.6%'
p436
sS'total_runs'
p437
L2L
sS'summary_score'
p438
F80.637707948244
sS'results'
p439
(dp440
S'failed'
p441
(dp442
S'score'
p443
I50
sS'raw_score'
p444
L419L
sS'display'
p445
S'419'
p446
ssS'passed'
p447
(dp448
g443
I50
sg444
L1745L
sg445
S'1745'
p449
ssssVChrome 1
p450
(dp451
S'summary_display'
p452
S'80.6%'
p453
sS'total_runs'
p454
L8L
sS'summary_score'
p455
F80.637707948244
sS'results'
p456
(dp457
S'failed'
p458
(dp459
S'score'
p460
I50
sS'raw_score'
p461
L419L
sS'display'
p462
S'419'
p463
ssS'passed'
p464
(dp465
g460
I50
sg461
L1745L
sg462
S'1745'
p466
ssssVNetNewsWire 3
p467
(dp468
S'summary_display'
p469
S'99.3%'
p470
sS'total_runs'
p471
L10L
sS'summary_score'
p472
F99.260628465804061
sS'results'
p473
(dp474
S'failed'
p475
(dp476
S'score'
p477
I75
sS'raw_score'
p478
L16L
sS'display'
p479
S'16'
p480
ssS'passed'
p481
(dp482
g477
I95
sg478
L2148L
sg479
S'2148'
p483
ssssVNokia 97
p484
(dp485
S'summary_display'
p486
S'0%'
p487
sS'total_runs'
p488
L2L
sS'summary_score'
p489
I100
sS'results'
p490
(dp491
S'failed'
p492
(dp493
S'score'
p494
I50
sS'raw_score'
p495
L2164L
sS'display'
p496
S'2164'
p497
ssS'passed'
p498
(dp499
g494
I50
sg495
L0L
sg496
g17
ssssVNetFront 3
p500
(dp501
S'summary_display'
p502
S'49.8%'
p503
sS'total_runs'
p504
L2L
sS'summary_score'
p505
F49.814471243042675
sS'results'
p506
(dp507
S'failed'
p508
(dp509
S'score'
p510
I50
sS'raw_score'
p511
L2164L
sS'display'
p512
S'2164'
p513
ssS'passed'
p514
(dp515
g510
I95
sg511
L2148L
sg512
S'2148'
p516
ssssVSafari 4
p517
(dp518
S'summary_display'
p519
S'99.3%'
p520
sS'total_runs'
p521
I960
sS'summary_score'
p522
F99.260628465804061
sS'results'
p523
(dp524
S'failed'
p525
(dp526
S'score'
p527
I75
sS'raw_score'
p528
I16
sS'display'
p529
S'16'
p530
ssS'passed'
p531
(dp532
g527
I95
sg528
I2148
sg529
S'2148'
p533
ssssVVodafone 1
p534
(dp535
S'summary_display'
p536
S'99.3%'
p537
sS'total_runs'
p538
L1L
sS'summary_score'
p539
F99.260628465804061
sS'results'
p540
(dp541
S'failed'
p542
(dp543
S'score'
p544
I75
sS'raw_score'
p545
L16L
sS'display'
p546
S'16'
p547
ssS'passed'
p548
(dp549
g544
I95
sg545
L2148L
sg546
S'2148'
p550
ssssViPhone 3
p551
(dp552
S'summary_display'
p553
S'97.8%'
p554
sS'total_runs'
p555
L113L
sS'summary_score'
p556
F97.781885397412196
sS'results'
p557
(dp558
S'failed'
p559
(dp560
S'score'
p561
I50
sS'raw_score'
p562
L48L
sS'display'
p563
S'48'
p564
ssS'passed'
p565
(dp566
g561
I95
sg562
L2116L
sg563
S'2116'
p567
ssssViPhone 2
p568
(dp569
S'summary_display'
p570
S'80.6%'
p571
sS'total_runs'
p572
L8L
sS'summary_score'
p573
F80.637707948244
sS'results'
p574
(dp575
S'failed'
p576
(dp577
S'score'
p578
I50
sS'raw_score'
p579
L419L
sS'display'
p580
S'419'
p581
ssS'passed'
p582
(dp583
g578
I50
sg579
L1745L
sg580
S'1745'
p584
ssssVEpiphany 2
p585
(dp586
S'summary_display'
p587
S'0%'
p588
sS'total_runs'
p589
L7L
sS'summary_score'
p590
I100
sS'results'
p591
(dp592
S'failed'
p593
(dp594
S'score'
p595
I50
sS'raw_score'
p596
L2164L
sS'display'
p597
S'2164'
p598
ssS'passed'
p599
(dp600
g595
I50
sg596
L0L
sg597
g17
ssssVChrome Frame (IE 6) 4
p601
(dp602
S'summary_display'
p603
S'99.3%'
p604
sS'total_runs'
p605
L2L
sS'summary_score'
p606
F99.260628465804061
sS'results'
p607
(dp608
S'failed'
p609
(dp610
S'score'
p611
I75
sS'raw_score'
p612
L16L
sS'display'
p613
S'16'
p614
ssS'passed'
p615
(dp616
g611
I95
sg612
L2148L
sg613
S'2148'
p617
ssssVAvant 1
p618
(dp619
S'summary_display'
p620
S'0%'
p621
sS'total_runs'
p622
L10L
sS'summary_score'
p623
I100
sS'results'
p624
(dp625
S'failed'
p626
(dp627
S'score'
p628
I50
sS'raw_score'
p629
L1816L
sS'display'
p630
S'1816'
p631
ssS'passed'
p632
(dp633
g628
I50
sg629
L0L
sg630
g17
ssssVIron 2
p634
(dp635
S'summary_display'
p636
S'99.3%'
p637
sS'total_runs'
p638
L7L
sS'summary_score'
p639
F99.260628465804061
sS'results'
p640
(dp641
S'failed'
p642
(dp643
S'score'
p644
I75
sS'raw_score'
p645
L16L
sS'display'
p646
S'16'
p647
ssS'passed'
p648
(dp649
g644
I95
sg645
L2148L
sg646
S'2148'
p650
ssssVIron 3
p651
(dp652
S'summary_display'
p653
S'99.3%'
p654
sS'total_runs'
p655
L53L
sS'summary_score'
p656
F99.260628465804061
sS'results'
p657
(dp658
S'failed'
p659
(dp660
S'score'
p661
I75
sS'raw_score'
p662
L16L
sS'display'
p663
S'16'
p664
ssS'passed'
p665
(dp666
g661
I95
sg662
L2148L
sg663
S'2148'
p667
ssssVSwiftfox 3
p668
(dp669
S'summary_display'
p670
S'99.3%'
p671
sS'total_runs'
p672
L10L
sS'summary_score'
p673
F99.260628465804061
sS'results'
p674
(dp675
S'failed'
p676
(dp677
S'score'
p678
I75
sS'raw_score'
p679
L16L
sS'display'
p680
S'16'
p681
ssS'passed'
p682
(dp683
g678
I95
sg679
L2148L
sg680
S'2148'
p684
ssssVOmniWeb 622
p685
(dp686
S'summary_display'
p687
S'99.3%'
p688
sS'total_runs'
p689
L5L
sS'summary_score'
p690
F99.260628465804061
sS'results'
p691
(dp692
S'failed'
p693
(dp694
S'score'
p695
I75
sS'raw_score'
p696
L16L
sS'display'
p697
S'16'
p698
ssS'passed'
p699
(dp700
g695
I95
sg696
L2148L
sg697
S'2148'
p701
ssssVIceweasel 3
p702
(dp703
S'summary_display'
p704
S'0%'
p705
sS'total_runs'
p706
L48L
sS'summary_score'
p707
I100
sS'results'
p708
(dp709
S'failed'
p710
(dp711
S'score'
p712
I50
sS'raw_score'
p713
L2164L
sS'display'
p714
S'2164'
p715
ssS'passed'
p716
(dp717
g712
I50
sg713
L0L
sg714
g17
ssssVFluid 0
p718
(dp719
S'summary_display'
p720
S'99.3%'
p721
sS'total_runs'
p722
L1L
sS'summary_score'
p723
F99.260628465804061
sS'results'
p724
(dp725
S'failed'
p726
(dp727
S'score'
p728
I75
sS'raw_score'
p729
L16L
sS'display'
p730
S'16'
p731
ssS'passed'
p732
(dp733
g728
I95
sg729
L2148L
sg730
S'2148'
p734
ssssS'total_runs'
p735
L10533L
sVSeaMonkey 2
p736
(dp737
S'summary_display'
p738
S'99.3%'
p739
sS'total_runs'
p740
L24L
sS'summary_score'
p741
F99.260628465804061
sS'results'
p742
(dp743
S'failed'
p744
(dp745
S'score'
p746
I75
sS'raw_score'
p747
L16L
sS'display'
p748
S'16'
p749
ssS'passed'
p750
(dp751
g746
I95
sg747
L2148L
sg748
S'2148'
p752
ssssVWii 9
p753
(dp754
S'summary_display'
p755
S'0%'
p756
sS'total_runs'
p757
I2
sS'summary_score'
p758
I100
sS'results'
p759
(dp760
S'failed'
p761
(dp762
S'score'
p763
I50
sS'raw_score'
p764
I2164
sS'display'
p765
S'2164'
p766
ssS'passed'
p767
(dp768
g763
I50
sg764
I0
sg765
g17
ssssVSafari 3
p769
(dp770
S'summary_display'
p771
S'80.6%'
p772
sS'total_runs'
p773
L36L
sS'summary_score'
p774
F80.637707948244
sS'results'
p775
(dp776
S'failed'
p777
(dp778
S'score'
p779
I50
sS'raw_score'
p780
L419L
sS'display'
p781
S'419'
p782
ssS'passed'
p783
(dp784
g779
I50
sg780
L1745L
sg781
S'1745'
p785
ssssVFlock 1
p786
(dp787
S'summary_display'
p788
S'0%'
p789
sS'total_runs'
p790
L2L
sS'summary_score'
p791
I100
sS'results'
p792
(dp793
S'failed'
p794
(dp795
S'score'
p796
I50
sS'raw_score'
p797
L2164L
sS'display'
p798
S'2164'
p799
ssS'passed'
p800
(dp801
g796
I50
sg797
L0L
sg798
g17
ssssVFirefox (Shiretoko) 3
p802
(dp803
S'summary_display'
p804
S'99.3%'
p805
sS'total_runs'
p806
L171L
sS'summary_score'
p807
F99.260628465804061
sS'results'
p808
(dp809
S'failed'
p810
(dp811
S'score'
p812
I75
sS'raw_score'
p813
L16L
sS'display'
p814
S'16'
p815
ssS'passed'
p816
(dp817
g812
I95
sg813
L2148L
sg814
S'2148'
p818
ssssVFennec 1
p819
(dp820
S'summary_display'
p821
S'99.3%'
p822
sS'total_runs'
p823
L6L
sS'summary_score'
p824
F99.260628465804061
sS'results'
p825
(dp826
S'failed'
p827
(dp828
S'score'
p829
I75
sS'raw_score'
p830
L16L
sS'display'
p831
S'16'
p832
ssS'passed'
p833
(dp834
g829
I95
sg830
L2148L
sg831
S'2148'
p835
ssssVNokia 5800
p836
(dp837
S'summary_display'
p838
S'0%'
p839
sS'total_runs'
p840
L5L
sS'summary_score'
p841
I100
sS'results'
p842
(dp843
S'failed'
p844
(dp845
S'score'
p846
I50
sS'raw_score'
p847
L2164L
sS'display'
p848
S'2164'
p849
ssS'passed'
p850
(dp851
g846
I50
sg847
L0L
sg848
g17
ssssVFlock 2
p852
(dp853
S'summary_display'
p854
S'0%'
p855
sS'total_runs'
p856
L6L
sS'summary_score'
p857
I100
sS'results'
p858
(dp859
S'failed'
p860
(dp861
S'score'
p862
I50
sS'raw_score'
p863
L2164L
sS'display'
p864
S'2164'
p865
ssS'passed'
p866
(dp867
g862
I50
sg863
L0L
sg864
g17
ssssVKonqueror 3
p868
(dp869
S'summary_display'
p870
S'0%'
p871
sS'total_runs'
p872
L10L
sS'summary_score'
p873
I100
sS'results'
p874
(dp875
S'failed'
p876
(dp877
S'score'
p878
I50
sS'raw_score'
p879
L2164L
sS'display'
p880
S'2164'
p881
ssS'passed'
p882
(dp883
g878
I50
sg879
L0L
sg880
g17
ssssVFirefox (Minefield) 3
p884
(dp885
S'summary_display'
p886
S'100.0%'
p887
sS'total_runs'
p888
I184
sS'summary_score'
p889
F100.0
sS'results'
p890
(dp891
S'failed'
p892
(dp893
S'score'
p894
I95
sS'raw_score'
p895
I0
sS'display'
p896
g17
ssS'passed'
p897
(dp898
g894
I95
sg895
I2164
sg896
S'2164'
p899
ssssViCab 4
p900
(dp901
S'summary_display'
p902
S'99.3%'
p903
sS'total_runs'
p904
L1L
sS'summary_score'
p905
F99.260628465804061
sS'results'
p906
(dp907
S'failed'
p908
(dp909
S'score'
p910
I75
sS'raw_score'
p911
L16L
sS'display'
p912
S'16'
p913
ssS'passed'
p914
(dp915
g910
I95
sg911
L2148L
sg912
S'2148'
p916
ssssVSleipnir 2
p917
(dp918
S'summary_display'
p919
S'58.5%'
p920
sS'total_runs'
p921
L15L
sS'summary_score'
p922
F58.456561922365992
sS'results'
p923
(dp924
S'failed'
p925
(dp926
S'score'
p927
I50
sS'raw_score'
p928
L899L
sS'display'
p929
S'899'
p930
ssS'passed'
p931
(dp932
g927
I50
sg928
L1265L
sg929
S'1265'
p933
ssssVKonqueror 4
p934
(dp935
S'summary_display'
p936
S'0%'
p937
sS'total_runs'
p938
I52
sS'summary_score'
p939
I100
sS'results'
p940
(dp941
S'failed'
p942
(dp943
S'score'
p944
I50
sS'raw_score'
p945
I2164
sS'display'
p946
S'2164'
p947
ssS'passed'
p948
(dp949
g944
I50
sg945
I0
sg946
g17
ssssVCamino 2
p950
(dp951
S'summary_display'
p952
S'0%'
p953
sS'total_runs'
p954
L6L
sS'summary_score'
p955
I100
sS'results'
p956
(dp957
S'failed'
p958
(dp959
S'score'
p960
I50
sS'raw_score'
p961
L2164L
sS'display'
p962
S'2164'
p963
ssS'passed'
p964
(dp965
g960
I50
sg961
L0L
sg962
g17
ssssVCamino 1
p966
(dp967
S'summary_display'
p968
S'0%'
p969
sS'total_runs'
p970
L17L
sS'summary_score'
p971
I100
sS'results'
p972
(dp973
S'failed'
p974
(dp975
S'score'
p976
I50
sS'raw_score'
p977
L2164L
sS'display'
p978
S'2164'
p979
ssS'passed'
p980
(dp981
g976
I50
sg977
L0L
sg978
g17
ssssVJasmine 1
p982
(dp983
S'summary_display'
p984
S'49.8%'
p985
sS'total_runs'
p986
L2L
sS'summary_score'
p987
F49.814471243042675
sS'results'
p988
(dp989
S'failed'
p990
(dp991
S'score'
p992
I50
sS'raw_score'
p993
L2164L
sS'display'
p994
S'2164'
p995
ssS'passed'
p996
(dp997
g992
I95
sg993
L2148L
sg994
S'2148'
p998
ssssVArora 0
p999
(dp1000
S'summary_display'
p1001
S'97.8%'
p1002
sS'total_runs'
p1003
L24L
sS'summary_score'
p1004
F97.781885397412196
sS'results'
p1005
(dp1006
S'failed'
p1007
(dp1008
S'score'
p1009
I50
sS'raw_score'
p1010
L48L
sS'display'
p1011
S'48'
p1012
ssS'passed'
p1013
(dp1014
g1009
I95
sg1010
L2116L
sg1011
S'2116'
p1015
ssssVAndroid 1
p1016
(dp1017
S'summary_display'
p1018
S'97.8%'
p1019
sS'total_runs'
p1020
I44
sS'summary_score'
p1021
F97.781885397412196
sS'results'
p1022
(dp1023
S'failed'
p1024
(dp1025
S'score'
p1026
I50
sS'raw_score'
p1027
I48
sS'display'
p1028
S'48'
p1029
ssS'passed'
p1030
(dp1031
g1026
I95
sg1027
I2116
sg1028
S'2116'
p1032
ssssVAndroid 2
p1033
(dp1034
S'summary_display'
p1035
S'99.3%'
p1036
sS'total_runs'
p1037
I12
sS'summary_score'
p1038
F99.260628465804061
sS'results'
p1039
(dp1040
S'failed'
p1041
(dp1042
S'score'
p1043
I75
sS'raw_score'
p1044
I16
sS'display'
p1045
S'16'
p1046
ssS'passed'
p1047
(dp1048
g1043
I95
sg1044
I2148
sg1045
S'2148'
p1049
ssssVIron 4
p1050
(dp1051
S'summary_display'
p1052
S'99.3%'
p1053
sS'total_runs'
p1054
I11
sS'summary_score'
p1055
F99.260628465804061
sS'results'
p1056
(dp1057
S'failed'
p1058
(dp1059
S'score'
p1060
I75
sS'raw_score'
p1061
I16
sS'display'
p1062
S'16'
p1063
ssS'passed'
p1064
(dp1065
g1060
I95
sg1061
I2148
sg1062
S'2148'
p1066
ssssVPalm Pre 1
p1067
(dp1068
S'summary_display'
p1069
S'99.3%'
p1070
sS'total_runs'
p1071
I9
sS'summary_score'
p1072
F99.260628465804061
sS'results'
p1073
(dp1074
S'failed'
p1075
(dp1076
S'score'
p1077
I75
sS'raw_score'
p1078
I16
sS'display'
p1079
S'16'
p1080
ssS'passed'
p1081
(dp1082
g1077
I95
sg1078
I2148
sg1079
S'2148'
p1083
ssssVKazehakase 0
p1084
(dp1085
S'summary_display'
p1086
S'0%'
p1087
sS'total_runs'
p1088
L1L
sS'summary_score'
p1089
I100
sS'results'
p1090
(dp1091
S'failed'
p1092
(dp1093
S'score'
p1094
I50
sS'raw_score'
p1095
L2164L
sS'display'
p1096
S'2164'
p1097
ssS'passed'
p1098
(dp1099
g1094
I50
sg1095
L0L
sg1096
g17
ssssVVienna 2
p1100
(dp1101
S'summary_display'
p1102
S'99.3%'
p1103
sS'total_runs'
p1104
L2L
sS'summary_score'
p1105
F99.260628465804061
sS'results'
p1106
(dp1107
S'failed'
p1108
(dp1109
S'score'
p1110
I75
sS'raw_score'
p1111
L16L
sS'display'
p1112
S'16'
p1113
ssS'passed'
p1114
(dp1115
g1110
I95
sg1111
L2148L
sg1112
S'2148'
p1116
ssssVStainless 0
p1117
(dp1118
S'summary_display'
p1119
S'99.3%'
p1120
sS'total_runs'
p1121
L5L
sS'summary_score'
p1122
F99.260628465804061
sS'results'
p1123
(dp1124
S'failed'
p1125
(dp1126
S'score'
p1127
I75
sS'raw_score'
p1128
L16L
sS'display'
p1129
S'16'
p1130
ssS'passed'
p1131
(dp1132
g1127
I95
sg1128
L2148L
sg1129
S'2148'
p1133
ssssVOpera Mini 4
p1134
(dp1135
S'summary_display'
p1136
S'97.5%'
p1137
sS'total_runs'
p1138
L22L
sS'summary_score'
p1139
F97.504621072088725
sS'results'
p1140
(dp1141
S'failed'
p1142
(dp1143
S'score'
p1144
I50
sS'raw_score'
p1145
L54L
sS'display'
p1146
S'54'
p1147
ssS'passed'
p1148
(dp1149
g1144
I95
sg1145
L2110L
sg1146
S'2110'
p1150
ssssVLunascape 6
p1151
(dp1152
S'summary_display'
p1153
S'99.3%'
p1154
sS'total_runs'
p1155
L10L
sS'summary_score'
p1156
F99.260628465804061
sS'results'
p1157
(dp1158
S'failed'
p1159
(dp1160
S'score'
p1161
I75
sS'raw_score'
p1162
L16L
sS'display'
p1163
S'16'
p1164
ssS'passed'
p1165
(dp1166
g1161
I95
sg1162
L2148L
sg1163
S'2148'
p1167
ssssVChrome Frame (IE 7) 4
p1168
(dp1169
S'summary_display'
p1170
S'99.3%'
p1171
sS'total_runs'
p1172
L5L
sS'summary_score'
p1173
F99.260628465804061
sS'results'
p1174
(dp1175
S'failed'
p1176
(dp1177
S'score'
p1178
I75
sS'raw_score'
p1179
L16L
sS'display'
p1180
S'16'
p1181
ssS'passed'
p1182
(dp1183
g1178
I95
sg1179
L2148L
sg1180
S'2148'
p1184
ssssVOpera 8
p1185
(dp1186
S'summary_display'
p1187
S'29.2%'
p1188
sS'total_runs'
p1189
L1L
sS'summary_score'
p1190
F29.166666666666668
sS'results'
p1191
(dp1192
S'failed'
p1193
(dp1194
S'score'
p1195
I50
sS'raw_score'
p1196
L561L
sS'display'
p1197
S'561'
p1198
ssS'passed'
p1199
(dp1200
g1195
I50
sg1196
L231L
sg1197
S'231'
p1201
ssssVOpera 9
p1202
(dp1203
S'summary_display'
p1204
S'0%'
p1205
sS'total_runs'
p1206
L57L
sS'summary_score'
p1207
I100
sS'results'
p1208
(dp1209
S'failed'
p1210
(dp1211
S'score'
p1212
I50
sS'raw_score'
p1213
L2164L
sS'display'
p1214
S'2164'
p1215
ssS'passed'
p1216
(dp1217
g1212
I50
sg1213
L0L
sg1214
g17
ssssVLunascape 5
p1218
(dp1219
S'summary_display'
p1220
S'99.3%'
p1221
sS'total_runs'
p1222
L8L
sS'summary_score'
p1223
F99.260628465804061
sS'results'
p1224
(dp1225
S'failed'
p1226
(dp1227
S'score'
p1228
I75
sS'raw_score'
p1229
L16L
sS'display'
p1230
S'16'
p1231
ssS'passed'
p1232
(dp1233
g1228
I95
sg1229
L2148L
sg1230
S'2148'
p1234
ssssVGranParadiso 3
p1235
(dp1236
S'summary_display'
p1237
S'0%'
p1238
sS'total_runs'
p1239
L1L
sS'summary_score'
p1240
I100
sS'results'
p1241
(dp1242
S'failed'
p1243
(dp1244
S'score'
p1245
I50
sS'raw_score'
p1246
L2164L
sS'display'
p1247
S'2164'
p1248
ssS'passed'
p1249
(dp1250
g1245
I50
sg1246
L0L
sg1247
g17
ssssVOther
p1251
(dp1252
S'summary_display'
p1253
S'99.3%'
p1254
sS'total_runs'
p1255
L65L
sS'summary_score'
p1256
F99.260628465804061
sS'results'
p1257
(dp1258
S'failed'
p1259
(dp1260
S'score'
p1261
I75
sS'raw_score'
p1262
L16L
sS'display'
p1263
S'16'
p1264
ssS'passed'
p1265
(dp1266
g1261
I95
sg1262
L2148L
sg1263
S'2148'
p1267
ssssVGaleon 2
p1268
(dp1269
S'summary_display'
p1270
S'0%'
p1271
sS'total_runs'
p1272
L2L
sS'summary_score'
p1273
I100
sS'results'
p1274
(dp1275
S'failed'
p1276
(dp1277
S'score'
p1278
I50
sS'raw_score'
p1279
L2164L
sS'display'
p1280
S'2164'
p1281
ssS'passed'
p1282
(dp1283
g1278
I50
sg1279
L0L
sg1280
g17
ssssVMicroB 0
p1284
(dp1285
S'summary_display'
p1286
S'0%'
p1287
sS'total_runs'
p1288
L1L
sS'summary_score'
p1289
I100
sS'results'
p1290
(dp1291
S'failed'
p1292
(dp1293
S'score'
p1294
I50
sS'raw_score'
p1295
L2164L
sS'display'
p1296
S'2164'
p1297
ssS'passed'
p1298
(dp1299
g1294
I50
sg1295
L0L
sg1296
g17
ssssVQtWeb 3
p1300
(dp1301
S'summary_display'
p1302
S'97.8%'
p1303
sS'total_runs'
p1304
L1L
sS'summary_score'
p1305
F97.781885397412196
sS'results'
p1306
(dp1307
S'failed'
p1308
(dp1309
S'score'
p1310
I50
sS'raw_score'
p1311
L48L
sS'display'
p1312
S'48'
p1313
ssS'passed'
p1314
(dp1315
g1310
I95
sg1311
L2116L
sg1312
S'2116'
p1316
ssssVSeaMonkey 1
p1317
(dp1318
S'summary_display'
p1319
S'0%'
p1320
sS'total_runs'
p1321
I10
sS'summary_score'
p1322
I100
sS'results'
p1323
(dp1324
S'failed'
p1325
(dp1326
S'score'
p1327
I50
sS'raw_score'
p1328
I2164
sS'display'
p1329
S'2164'
p1330
ssS'passed'
p1331
(dp1332
g1327
I50
sg1328
I0
sg1329
g17
ssssVIceweasel 2
p1333
(dp1334
S'summary_display'
p1335
S'0%'
p1336
sS'total_runs'
p1337
L1L
sS'summary_score'
p1338
I100
sS'results'
p1339
(dp1340
S'failed'
p1341
(dp1342
S'score'
p1343
I50
sS'raw_score'
p1344
L2164L
sS'display'
p1345
S'2164'
p1346
ssS'passed'
p1347
(dp1348
g1343
I50
sg1344
L0L
sg1345
g17
ssssVUzbl 0
p1349
(dp1350
S'summary_display'
p1351
S'99.3%'
p1352
sS'total_runs'
p1353
L25L
sS'summary_score'
p1354
F99.260628465804061
sS'results'
p1355
(dp1356
S'failed'
p1357
(dp1358
S'score'
p1359
I75
sS'raw_score'
p1360
L16L
sS'display'
p1361
S'16'
p1362
ssS'passed'
p1363
(dp1364
g1359
I95
sg1360
L2148L
sg1361
S'2148'
p1365
ssss. | apache-2.0 | 6,179,444,714,391,151,000 | 6.963126 | 26 | 0.761023 | false |
Manojkumar91/odoo_inresto | addons/account/report/account_report.py | 8 | 12764 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from dateutil.relativedelta import relativedelta
from openerp import tools
from openerp.osv import fields,osv
def _code_get(self, cr, uid, context=None):
acc_type_obj = self.pool.get('account.account.type')
ids = acc_type_obj.search(cr, uid, [])
res = acc_type_obj.read(cr, uid, ids, ['code', 'name'], context)
return [(r['code'], r['name']) for r in res]
class report_account_receivable(osv.osv):
_name = "report.account.receivable"
_description = "Receivable accounts"
_auto = False
_columns = {
'name': fields.char('Week of Year', size=7, readonly=True),
'type': fields.selection(_code_get, 'Account Type', required=True),
'balance':fields.float('Balance', readonly=True),
'debit':fields.float('Debit', readonly=True),
'credit':fields.float('Credit', readonly=True),
}
_order = 'name desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_receivable')
cr.execute("""
create or replace view report_account_receivable as (
select
min(l.id) as id,
to_char(date,'YYYY:IW') as name,
sum(l.debit-l.credit) as balance,
sum(l.debit) as debit,
sum(l.credit) as credit,
a.type
from
account_move_line l
left join
account_account a on (l.account_id=a.id)
where
l.state <> 'draft'
group by
to_char(date,'YYYY:IW'), a.type
)""")
#a.type in ('receivable','payable')
class temp_range(osv.osv):
_name = 'temp.range'
_description = 'A Temporary table used for Dashboard view'
_columns = {
'name': fields.char('Range')
}
class report_aged_receivable(osv.osv):
_name = "report.aged.receivable"
_description = "Aged Receivable Till Today"
_auto = False
def __init__(self, pool, cr):
super(report_aged_receivable, self).__init__(pool, cr)
self.called = False
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" To call the init() method timely
"""
if context is None:context = {}
if not self.called:
self._init(cr, user)
self.called = True # To make sure that init doesn't get called multiple times
res = super(report_aged_receivable, self).fields_view_get(cr, user, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
return res
def _calc_bal(self, cr, uid, ids, name, args, context=None):
res = {}
for period in self.read(cr, uid, ids, ['name'], context=context):
date1,date2 = period['name'].split(' to ')
cr.execute("SELECT SUM(credit-debit) FROM account_move_line AS line, account_account as ac \
WHERE (line.account_id=ac.id) AND ac.type='receivable' \
AND (COALESCE(line.date,date) BETWEEN %s AND %s) \
AND (reconcile_id IS NULL) AND ac.active",(str(date2),str(date1),))
amount = cr.fetchone()
amount = amount[0] or 0.00
res[period['id']] = amount
return res
_columns = {
'name': fields.char('Month Range', size=24, readonly=True),
'balance': fields.function(_calc_bal, string='Balance', readonly=True),
}
def init(self, cr):
return self._init(cr, 1)
def _init(self, cr, uid):
""" This view will be used in dashboard
The reason writing this code here is, we need to check date range from today to first date of fiscal year.
"""
pool_obj_fy = self.pool['account.fiscalyear']
current_date = datetime.date.today()
fy_id = pool_obj_fy.find(cr, uid, exception=False)
names = []
def add(names, start_on, stop_on):
names.append(start_on.strftime("%Y-%m-%d") + ' to ' + stop_on.strftime('%Y-%m-%d'))
return names
if fy_id:
fiscal_year = pool_obj_fy.browse(cr, uid, fy_id)
fy_start_date = datetime.datetime.strptime(fiscal_year.date_start, '%Y-%m-%d').date()
last_month_date = current_date - relativedelta(months=1)
while (last_month_date > fy_start_date):
add(names, current_date, last_month_date)
current_date = last_month_date - relativedelta(days=1)
last_month_date = current_date - relativedelta(months=1)
add(names, current_date, fy_start_date)
cr.execute('delete from temp_range')
for name in names:
self.pool['temp.range'].create(cr, uid, {'name':name})
cr.execute("""
create or replace view report_aged_receivable as (
select id,name from temp_range
)""")
class report_invoice_created(osv.osv):
_name = "report.invoice.created"
_description = "Report of Invoices Created within Last 15 days"
_auto = False
_columns = {
'name': fields.char('Description', readonly=True),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'number': fields.char('Invoice Number', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'amount_untaxed': fields.float('Untaxed', readonly=True),
'amount_total': fields.float('Total', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'date_invoice': fields.date('Invoice Date', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'residual': fields.float('Residual', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
],'Status', readonly=True),
'origin': fields.char('Source Document', readonly=True, help="Reference of the document that generated this invoice report."),
'create_date': fields.datetime('Create Date', readonly=True)
}
_order = 'create_date'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_invoice_created')
cr.execute("""create or replace view report_invoice_created as (
select
inv.id as id, inv.name as name, inv.type as type,
inv.number as number, inv.partner_id as partner_id,
inv.amount_untaxed as amount_untaxed,
inv.amount_total as amount_total, inv.currency_id as currency_id,
inv.date_invoice as date_invoice, inv.date_due as date_due,
inv.residual as residual, inv.state as state,
inv.origin as origin, inv.create_date as create_date
from
account_invoice inv
where
(to_date(to_char(inv.create_date, 'YYYY-MM-dd'),'YYYY-MM-dd') <= CURRENT_DATE)
AND
(to_date(to_char(inv.create_date, 'YYYY-MM-dd'),'YYYY-MM-dd') > (CURRENT_DATE-15))
)""")
class report_account_type_sales(osv.osv):
_name = "report.account_type.sales"
_description = "Report of the Sales by Account Type"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'user_type': fields.many2one('account.account.type', 'Account Type', readonly=True),
'amount_total': fields.float('Total', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
}
_order = 'name desc,amount_total desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_type_sales')
cr.execute("""create or replace view report_account_type_sales as (
select
min(inv_line.id) as id,
to_char(inv.date_invoice, 'YYYY') as name,
to_char(inv.date_invoice,'MM') as month,
sum(inv_line.price_subtotal) as amount_total,
inv.currency_id as currency_id,
inv.period_id,
inv_line.product_id,
sum(inv_line.quantity) as quantity,
account.user_type
from
account_invoice_line inv_line
inner join account_invoice inv on inv.id = inv_line.invoice_id
inner join account_account account on account.id = inv_line.account_id
where
inv.state in ('open','paid')
group by
to_char(inv.date_invoice, 'YYYY'),to_char(inv.date_invoice,'MM'),inv.currency_id, inv.period_id, inv_line.product_id, account.user_type
)""")
class report_account_sales(osv.osv):
_name = "report.account.sales"
_description = "Report of the Sales by Account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True, select=True),
'period_id': fields.many2one('account.period', 'Force Period', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'account_id': fields.many2one('account.account', 'Account', readonly=True),
'amount_total': fields.float('Total', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
}
_order = 'name desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_sales')
cr.execute("""create or replace view report_account_sales as (
select
min(inv_line.id) as id,
to_char(inv.date_invoice, 'YYYY') as name,
to_char(inv.date_invoice,'MM') as month,
sum(inv_line.price_subtotal) as amount_total,
inv.currency_id as currency_id,
inv.period_id,
inv_line.product_id,
sum(inv_line.quantity) as quantity,
account.id as account_id
from
account_invoice_line inv_line
inner join account_invoice inv on inv.id = inv_line.invoice_id
inner join account_account account on account.id = inv_line.account_id
where
inv.state in ('open','paid')
group by
to_char(inv.date_invoice, 'YYYY'),to_char(inv.date_invoice,'MM'),inv.currency_id, inv.period_id, inv_line.product_id, account.id
)""")
| agpl-3.0 | 6,816,886,781,557,173,000 | 43.319444 | 167 | 0.56816 | false |
bzennn/blog_flask | python/lib/python3.5/site-packages/olefile/olefile.py | 29 | 111647 | """
olefile (formerly OleFileIO_PL)
Module to read/write Microsoft OLE2 files (also called Structured Storage or
Microsoft Compound Document File Format), such as Microsoft Office 97-2003
documents, Image Composer and FlashPix files, Outlook messages, ...
This version is compatible with Python 2.6+ and 3.x
Project website: https://www.decalage.info/olefile
olefile is copyright (c) 2005-2017 Philippe Lagadec
(https://www.decalage.info)
olefile is based on the OleFileIO module from the PIL library v1.1.7
See: http://www.pythonware.com/products/pil/index.htm
and http://svn.effbot.org/public/tags/pil-1.1.7/PIL/OleFileIO.py
The Python Imaging Library (PIL) is
Copyright (c) 1997-2009 by Secret Labs AB
Copyright (c) 1995-2009 by Fredrik Lundh
See source code and LICENSE.txt for information on usage and redistribution.
"""
# Since OleFileIO_PL v0.30, only Python 2.6+ and 3.x is supported
# This import enables print() as a function rather than a keyword
# (main requirement to be compatible with Python 3.x)
# The comment on the line below should be printed on Python 2.5 or older:
from __future__ import print_function # This version of olefile requires Python 2.6+ or 3.x.
#--- LICENSE ------------------------------------------------------------------
# olefile (formerly OleFileIO_PL) is copyright (c) 2005-2017 Philippe Lagadec
# (https://www.decalage.info)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------
# PIL License:
#
# olefile is based on source code from the OleFileIO module of the Python
# Imaging Library (PIL) published by Fredrik Lundh under the following license:
# The Python Imaging Library (PIL) is
# Copyright (c) 1997-2009 by Secret Labs AB
# Copyright (c) 1995-2009 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its associated
# documentation, you agree that you have read, understood, and will comply with
# the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and that both
# that copyright notice and this permission notice appear in supporting
# documentation, and that the name of Secret Labs AB or the author(s) not be used
# in advertising or publicity pertaining to distribution of the software
# without specific, written prior permission.
#
# SECRET LABS AB AND THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL SECRET LABS AB OR THE AUTHORS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#-----------------------------------------------------------------------------
# CHANGELOG: (only olefile/OleFileIO_PL changes compared to PIL 1.1.6)
# 2005-05-11 v0.10 PL: - a few fixes for Python 2.4 compatibility
# (all changes flagged with [PL])
# 2006-02-22 v0.11 PL: - a few fixes for some Office 2003 documents which raise
# exceptions in OleStream.__init__()
# 2006-06-09 v0.12 PL: - fixes for files above 6.8MB (DIFAT in loadfat)
# - added some constants
# - added header values checks
# - added some docstrings
# - getsect: bugfix in case sectors >512 bytes
# - getsect: added conformity checks
# - DEBUG_MODE constant to activate debug display
# 2007-09-04 v0.13 PL: - improved/translated (lots of) comments
# - updated license
# - converted tabs to 4 spaces
# 2007-11-19 v0.14 PL: - added OleFileIO._raise_defect() to adapt sensitivity
# - improved _unicode() to use Python 2.x unicode support
# - fixed bug in OleDirectoryEntry
# 2007-11-25 v0.15 PL: - added safety checks to detect FAT loops
# - fixed OleStream which didn't check stream size
# - added/improved many docstrings and comments
# - moved helper functions _unicode and _clsid out of
# OleFileIO class
# - improved OleFileIO._find() to add Unix path syntax
# - OleFileIO._find() is now case-insensitive
# - added get_type() and get_rootentry_name()
# - rewritten loaddirectory and OleDirectoryEntry
# 2007-11-27 v0.16 PL: - added OleDirectoryEntry.kids_dict
# - added detection of duplicate filenames in storages
# - added detection of duplicate references to streams
# - added get_size() and exists() to OleDirectoryEntry
# - added isOleFile to check header before parsing
# - added __all__ list to control public keywords in pydoc
# 2007-12-04 v0.17 PL: - added _load_direntry to fix a bug in loaddirectory
# - improved _unicode(), added workarounds for Python <2.3
# - added set_debug_mode and -d option to set debug mode
# - fixed bugs in OleFileIO.open and OleDirectoryEntry
# - added safety check in main for large or binary
# properties
# - allow size>0 for storages for some implementations
# 2007-12-05 v0.18 PL: - fixed several bugs in handling of FAT, MiniFAT and
# streams
# - added option '-c' in main to check all streams
# 2009-12-10 v0.19 PL: - bugfix for 32 bit arrays on 64 bits platforms
# (thanks to Ben G. and Martijn for reporting the bug)
# 2009-12-11 v0.20 PL: - bugfix in OleFileIO.open when filename is not plain str
# 2010-01-22 v0.21 PL: - added support for big-endian CPUs such as PowerPC Macs
# 2012-02-16 v0.22 PL: - fixed bug in getproperties, patch by chuckleberryfinn
# (https://github.com/decalage2/olefile/issues/7)
# - added close method to OleFileIO (fixed issue #2)
# 2012-07-25 v0.23 PL: - added support for file-like objects (patch by mete0r_kr)
# 2013-05-05 v0.24 PL: - getproperties: added conversion from filetime to python
# datetime
# - main: displays properties with date format
# - new class OleMetadata to parse standard properties
# - added get_metadata method
# 2013-05-07 v0.24 PL: - a few improvements in OleMetadata
# 2013-05-24 v0.25 PL: - getproperties: option to not convert some timestamps
# - OleMetaData: total_edit_time is now a number of seconds,
# not a timestamp
# - getproperties: added support for VT_BOOL, VT_INT, V_UINT
# - getproperties: filter out null chars from strings
# - getproperties: raise non-fatal defects instead of
# exceptions when properties cannot be parsed properly
# 2013-05-27 PL: - getproperties: improved exception handling
# - _raise_defect: added option to set exception type
# - all non-fatal issues are now recorded, and displayed
# when run as a script
# 2013-07-11 v0.26 PL: - added methods to get modification and creation times
# of a directory entry or a storage/stream
# - fixed parsing of direntry timestamps
# 2013-07-24 PL: - new options in listdir to list storages and/or streams
# 2014-02-04 v0.30 PL: - upgraded code to support Python 3.x by Martin Panter
# - several fixes for Python 2.6 (xrange, MAGIC)
# - reused i32 from Pillow's _binary
# 2014-07-18 v0.31 - preliminary support for 4K sectors
# 2014-07-27 v0.31 PL: - a few improvements in OleFileIO.open (header parsing)
# - Fixed loadfat for large files with 4K sectors (issue #3)
# 2014-07-30 v0.32 PL: - added write_sect to write sectors to disk
# - added write_mode option to OleFileIO.__init__ and open
# 2014-07-31 PL: - fixed padding in write_sect for Python 3, added checks
# - added write_stream to write a stream to disk
# 2014-09-26 v0.40 PL: - renamed OleFileIO_PL to olefile
# 2014-11-09 NE: - added support for Jython (Niko Ehrenfeuchter)
# 2014-11-13 v0.41 PL: - improved isOleFile and OleFileIO.open to support OLE
# data in a string buffer and file-like objects.
# 2014-11-21 PL: - updated comments according to Pillow's commits
# 2015-01-24 v0.42 PL: - changed the default path name encoding from Latin-1
# to UTF-8 on Python 2.x (Unicode on Python 3.x)
# - added path_encoding option to override the default
# - fixed a bug in _list when a storage is empty
# 2015-04-17 v0.43 PL: - slight changes in OleDirectoryEntry
# 2015-10-19 - fixed issue #26 in OleFileIO.getproperties
# (using id and type as local variable names)
# 2015-10-29 - replaced debug() with proper logging
# - use optparse to handle command line options
# - improved attribute names in OleFileIO class
# 2015-11-05 - fixed issue #27 by correcting the MiniFAT sector
# cutoff size if invalid.
# 2016-02-02 - logging is disabled by default
# 2016-04-26 v0.44 PL: - added enable_logging
# - renamed _OleDirectoryEntry and _OleStream without '_'
# - in OleStream use _raise_defect instead of exceptions
# 2016-04-27 - added support for incomplete streams and incorrect
# directory entries (to read malformed documents)
# 2016-05-04 - fixed slight bug in OleStream
# 2016-11-27 DR: - added method to get the clsid of a storage/stream
# (Daniel Roethlisberger)
__date__ = "2017-01-06"
__version__ = '0.44'
__author__ = "Philippe Lagadec"
#-----------------------------------------------------------------------------
# TODO (for version 1.0):
# + get rid of print statements, to simplify Python 2.x and 3.x support
# + add is_stream and is_storage
# + remove leading and trailing slashes where a path is used
# + add functions path_list2str and path_str2list
# + fix how all the methods handle unicode str and/or bytes as arguments
# + add path attrib to _OleDirEntry, set it once and for all in init or
# append_kids (then listdir/_list can be simplified)
# - TESTS with Linux, MacOSX, Python 1.5.2, various files, PIL, ...
# - add underscore to each private method, to avoid their display in
# pydoc/epydoc documentation - Remove it for classes to be documented
# - replace all raised exceptions with _raise_defect (at least in OleFileIO)
# - merge code from OleStream and OleFileIO.getsect to read sectors
# (maybe add a class for FAT and MiniFAT ?)
# - add method to check all streams (follow sectors chains without storing all
# stream in memory, and report anomalies)
# - use OleDirectoryEntry.kids_dict to improve _find and _list ?
# - fix Unicode names handling (find some way to stay compatible with Py1.5.2)
# => if possible avoid converting names to Latin-1
# - review DIFAT code: fix handling of DIFSECT blocks in FAT (not stop)
# - rewrite OleFileIO.getproperties
# - improve docstrings to show more sample uses
# - see also original notes and FIXME below
# - remove all obsolete FIXMEs
# - OleMetadata: fix version attrib according to
# https://msdn.microsoft.com/en-us/library/dd945671%28v=office.12%29.aspx
# IDEAS:
# - in OleFileIO._open and OleStream, use size=None instead of 0x7FFFFFFF for
# streams with unknown size
# - use arrays of int instead of long integers for FAT/MiniFAT, to improve
# performance and reduce memory usage ? (possible issue with values >2^31)
# - provide tests with unittest (may need write support to create samples)
# - move all debug code (and maybe dump methods) to a separate module, with
# a class which inherits OleFileIO ?
# - fix docstrings to follow epydoc format
# - add support for big endian byte order ?
# - create a simple OLE explorer with wxPython
# FUTURE EVOLUTIONS to add write support:
# see issue #6 on GitHub:
# https://github.com/decalage2/olefile/issues/6
#-----------------------------------------------------------------------------
# NOTES from PIL 1.1.6:
# History:
# 1997-01-20 fl Created
# 1997-01-22 fl Fixed 64-bit portability quirk
# 2003-09-09 fl Fixed typo in OleFileIO.loadfat (noted by Daniel Haertle)
# 2004-02-29 fl Changed long hex constants to signed integers
#
# Notes:
# FIXME: sort out sign problem (eliminate long hex constants)
# FIXME: change filename to use "a/b/c" instead of ["a", "b", "c"]
# FIXME: provide a glob mechanism function (using fnmatchcase)
#
# Literature:
#
# "FlashPix Format Specification, Appendix A", Kodak and Microsoft,
# September 1996.
#
# Quotes:
#
# "If this document and functionality of the Software conflict,
# the actual functionality of the Software represents the correct
# functionality" -- Microsoft, in the OLE format specification
#------------------------------------------------------------------------------
__all__ = ['isOleFile', 'OleFileIO', 'OleMetadata', 'enable_logging',
'MAGIC', 'STGTY_EMPTY',
'STGTY_STREAM', 'STGTY_STORAGE', 'STGTY_ROOT', 'STGTY_PROPERTY',
'STGTY_LOCKBYTES', 'MINIMAL_OLEFILE_SIZE',]
import io
import sys
import struct, array, os.path, datetime, logging
#=== COMPATIBILITY WORKAROUNDS ================================================
#[PL] Define explicitly the public API to avoid private objects in pydoc:
#TODO: add more
# __all__ = ['OleFileIO', 'isOleFile', 'MAGIC']
# For Python 3.x, need to redefine long as int:
if str is not bytes:
long = int
# Need to make sure we use xrange both on Python 2 and 3.x:
try:
# on Python 2 we need xrange:
iterrange = xrange
except:
# no xrange, for Python 3 it was renamed as range:
iterrange = range
#[PL] workaround to fix an issue with array item size on 64 bits systems:
if array.array('L').itemsize == 4:
# on 32 bits platforms, long integers in an array are 32 bits:
UINT32 = 'L'
elif array.array('I').itemsize == 4:
# on 64 bits platforms, integers in an array are 32 bits:
UINT32 = 'I'
elif array.array('i').itemsize == 4:
# On 64 bit Jython, signed integers ('i') are the only way to store our 32
# bit values in an array in a *somewhat* reasonable way, as the otherwise
# perfectly suited 'H' (unsigned int, 32 bits) results in a completely
# unusable behaviour. This is most likely caused by the fact that Java
# doesn't have unsigned values, and thus Jython's "array" implementation,
# which is based on "jarray", doesn't have them either.
# NOTE: to trick Jython into converting the values it would normally
# interpret as "signed" into "unsigned", a binary-and operation with
# 0xFFFFFFFF can be used. This way it is possible to use the same comparing
# operations on all platforms / implementations. The corresponding code
# lines are flagged with a 'JYTHON-WORKAROUND' tag below.
UINT32 = 'i'
else:
raise ValueError('Need to fix a bug with 32 bit arrays, please contact author...')
#[PL] These workarounds were inspired from the Path module
# (see http://www.jorendorff.com/articles/python/path/)
try:
basestring
except NameError:
basestring = str
#[PL] Experimental setting: if True, OLE filenames will be kept in Unicode
# if False (default PIL behaviour), all filenames are converted to Latin-1.
KEEP_UNICODE_NAMES = True
if sys.version_info[0] < 3:
# On Python 2.x, the default encoding for path names is UTF-8:
DEFAULT_PATH_ENCODING = 'utf-8'
else:
# On Python 3.x, the default encoding for path names is Unicode (None):
DEFAULT_PATH_ENCODING = None
# === LOGGING =================================================================
class NullHandler(logging.Handler):
"""
Log Handler without output, to avoid printing messages if logging is not
configured by the main application.
Python 2.7 has logging.NullHandler, but this is necessary for 2.6:
see https://docs.python.org/2.6/library/logging.html#configuring-logging-for-a-library
"""
def emit(self, record):
pass
def get_logger(name, level=logging.CRITICAL+1):
"""
Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging.
"""
# First, test if there is already a logger with the same name, else it
# will generate duplicate messages (due to duplicate handlers):
if name in logging.Logger.manager.loggerDict:
#NOTE: another less intrusive but more "hackish" solution would be to
# use getLogger then test if its effective level is not default.
logger = logging.getLogger(name)
# make sure level is OK:
logger.setLevel(level)
return logger
# get a new logger:
logger = logging.getLogger(name)
# only add a NullHandler for this logger, it is up to the application
# to configure its own logging:
logger.addHandler(NullHandler())
logger.setLevel(level)
return logger
# a global logger object used for debugging:
log = get_logger('olefile')
def enable_logging():
"""
Enable logging for this module (disabled by default).
This will set the module-specific logger level to NOTSET, which
means the main application controls the actual logging level.
"""
log.setLevel(logging.NOTSET)
#=== CONSTANTS ===============================================================
#: magic bytes that should be at the beginning of every OLE file:
MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
#[PL]: added constants for Sector IDs (from AAF specifications)
MAXREGSECT = 0xFFFFFFFA #: (-6) maximum SECT
DIFSECT = 0xFFFFFFFC #: (-4) denotes a DIFAT sector in a FAT
FATSECT = 0xFFFFFFFD #: (-3) denotes a FAT sector in a FAT
ENDOFCHAIN = 0xFFFFFFFE #: (-2) end of a virtual stream chain
FREESECT = 0xFFFFFFFF #: (-1) unallocated sector
#[PL]: added constants for Directory Entry IDs (from AAF specifications)
MAXREGSID = 0xFFFFFFFA #: (-6) maximum directory entry ID
NOSTREAM = 0xFFFFFFFF #: (-1) unallocated directory entry
#[PL] object types in storage (from AAF specifications)
STGTY_EMPTY = 0 #: empty directory entry
STGTY_STORAGE = 1 #: element is a storage object
STGTY_STREAM = 2 #: element is a stream object
STGTY_LOCKBYTES = 3 #: element is an ILockBytes object
STGTY_PROPERTY = 4 #: element is an IPropertyStorage object
STGTY_ROOT = 5 #: element is a root storage
# Unknown size for a stream (used by OleStream):
UNKNOWN_SIZE = 0x7FFFFFFF
#
# --------------------------------------------------------------------
# property types
VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6;
VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11;
VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17;
VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23;
VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28;
VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64;
VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68;
VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72;
VT_VECTOR=0x1000;
# map property id to name (for debugging purposes)
VT = {}
for keyword, var in list(vars().items()):
if keyword[:3] == "VT_":
VT[var] = keyword
#
# --------------------------------------------------------------------
# Some common document types (root.clsid fields)
WORD_CLSID = "00020900-0000-0000-C000-000000000046"
#TODO: check Excel, PPT, ...
#[PL]: Defect levels to classify parsing errors - see OleFileIO._raise_defect()
DEFECT_UNSURE = 10 # a case which looks weird, but not sure it's a defect
DEFECT_POTENTIAL = 20 # a potential defect
DEFECT_INCORRECT = 30 # an error according to specifications, but parsing
# can go on
DEFECT_FATAL = 40 # an error which cannot be ignored, parsing is
# impossible
# Minimal size of an empty OLE file, with 512-bytes sectors = 1536 bytes
# (this is used in isOleFile and OleFile.open)
MINIMAL_OLEFILE_SIZE = 1536
#[PL] add useful constants to __all__:
# for key in list(vars().keys()):
# if key.startswith('STGTY_') or key.startswith('DEFECT_'):
# __all__.append(key)
#=== FUNCTIONS ===============================================================
def isOleFile (filename):
"""
Test if a file is an OLE container (according to the magic bytes in its header).
.. note::
This function only checks the first 8 bytes of the file, not the
rest of the OLE structure.
.. versionadded:: 0.16
:param filename: filename, contents or file-like object of the OLE file (string-like or file-like object)
- if filename is a string smaller than 1536 bytes, it is the path
of the file to open. (bytes or unicode string)
- if filename is a string longer than 1535 bytes, it is parsed
as the content of an OLE file in memory. (bytes type only)
- if filename is a file-like object (with read and seek methods),
it is parsed as-is.
:type filename: bytes or str or unicode or file
:returns: True if OLE, False otherwise.
:rtype: bool
"""
# check if filename is a string-like or file-like object:
if hasattr(filename, 'read'):
# file-like object: use it directly
header = filename.read(len(MAGIC))
# just in case, seek back to start of file:
filename.seek(0)
elif isinstance(filename, bytes) and len(filename) >= MINIMAL_OLEFILE_SIZE:
# filename is a bytes string containing the OLE file to be parsed:
header = filename[:len(MAGIC)]
else:
# string-like object: filename of file on disk
with open(filename, 'rb') as fp:
header = fp.read(len(MAGIC))
if header == MAGIC:
return True
else:
return False
if bytes is str:
# version for Python 2.x
def i8(c):
return ord(c)
else:
# version for Python 3.x
def i8(c):
return c if c.__class__ is int else c[0]
#TODO: replace i16 and i32 with more readable struct.unpack equivalent?
def i16(c, o = 0):
"""
Converts a 2-bytes (16 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return struct.unpack("<H", c[o:o+2])[0]
def i32(c, o = 0):
"""
Converts a 4-bytes (32 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return struct.unpack("<I", c[o:o+4])[0]
def _clsid(clsid):
"""
Converts a CLSID to a human-readable string.
:param clsid: string of length 16.
"""
assert len(clsid) == 16
# if clsid is only made of null bytes, return an empty string:
# (PL: why not simply return the string with zeroes?)
if not clsid.strip(b"\0"):
return ""
return (("%08X-%04X-%04X-%02X%02X-" + "%02X" * 6) %
((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) +
tuple(map(i8, clsid[8:16]))))
def filetime2datetime(filetime):
"""
convert FILETIME (64 bits int) to Python datetime.datetime
"""
# TODO: manage exception when microseconds is too large
# inspired from https://code.activestate.com/recipes/511425-filetime-to-datetime/
_FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0)
#log.debug('timedelta days=%d' % (filetime//(10*1000000*3600*24)))
return _FILETIME_null_date + datetime.timedelta(microseconds=filetime//10)
#=== CLASSES ==================================================================
class OleMetadata:
"""
class to parse and store metadata from standard properties of OLE files.
Available attributes:
codepage, title, subject, author, keywords, comments, template,
last_saved_by, revision_number, total_edit_time, last_printed, create_time,
last_saved_time, num_pages, num_words, num_chars, thumbnail,
creating_application, security, codepage_doc, category, presentation_target,
bytes, lines, paragraphs, slides, notes, hidden_slides, mm_clips,
scale_crop, heading_pairs, titles_of_parts, manager, company, links_dirty,
chars_with_spaces, unused, shared_doc, link_base, hlinks, hlinks_changed,
version, dig_sig, content_type, content_status, language, doc_version
Note: an attribute is set to None when not present in the properties of the
OLE file.
References for SummaryInformation stream:
- https://msdn.microsoft.com/en-us/library/dd942545.aspx
- https://msdn.microsoft.com/en-us/library/dd925819%28v=office.12%29.aspx
- https://msdn.microsoft.com/en-us/library/windows/desktop/aa380376%28v=vs.85%29.aspx
- https://msdn.microsoft.com/en-us/library/aa372045.aspx
- http://sedna-soft.de/articles/summary-information-stream/
- https://poi.apache.org/apidocs/org/apache/poi/hpsf/SummaryInformation.html
References for DocumentSummaryInformation stream:
- https://msdn.microsoft.com/en-us/library/dd945671%28v=office.12%29.aspx
- https://msdn.microsoft.com/en-us/library/windows/desktop/aa380374%28v=vs.85%29.aspx
- https://poi.apache.org/apidocs/org/apache/poi/hpsf/DocumentSummaryInformation.html
new in version 0.25
"""
# attribute names for SummaryInformation stream properties:
# (ordered by property id, starting at 1)
SUMMARY_ATTRIBS = ['codepage', 'title', 'subject', 'author', 'keywords', 'comments',
'template', 'last_saved_by', 'revision_number', 'total_edit_time',
'last_printed', 'create_time', 'last_saved_time', 'num_pages',
'num_words', 'num_chars', 'thumbnail', 'creating_application',
'security']
# attribute names for DocumentSummaryInformation stream properties:
# (ordered by property id, starting at 1)
DOCSUM_ATTRIBS = ['codepage_doc', 'category', 'presentation_target', 'bytes', 'lines', 'paragraphs',
'slides', 'notes', 'hidden_slides', 'mm_clips',
'scale_crop', 'heading_pairs', 'titles_of_parts', 'manager',
'company', 'links_dirty', 'chars_with_spaces', 'unused', 'shared_doc',
'link_base', 'hlinks', 'hlinks_changed', 'version', 'dig_sig',
'content_type', 'content_status', 'language', 'doc_version']
def __init__(self):
"""
Constructor for OleMetadata
All attributes are set to None by default
"""
# properties from SummaryInformation stream
self.codepage = None
self.title = None
self.subject = None
self.author = None
self.keywords = None
self.comments = None
self.template = None
self.last_saved_by = None
self.revision_number = None
self.total_edit_time = None
self.last_printed = None
self.create_time = None
self.last_saved_time = None
self.num_pages = None
self.num_words = None
self.num_chars = None
self.thumbnail = None
self.creating_application = None
self.security = None
# properties from DocumentSummaryInformation stream
self.codepage_doc = None
self.category = None
self.presentation_target = None
self.bytes = None
self.lines = None
self.paragraphs = None
self.slides = None
self.notes = None
self.hidden_slides = None
self.mm_clips = None
self.scale_crop = None
self.heading_pairs = None
self.titles_of_parts = None
self.manager = None
self.company = None
self.links_dirty = None
self.chars_with_spaces = None
self.unused = None
self.shared_doc = None
self.link_base = None
self.hlinks = None
self.hlinks_changed = None
self.version = None
self.dig_sig = None
self.content_type = None
self.content_status = None
self.language = None
self.doc_version = None
def parse_properties(self, olefile):
"""
Parse standard properties of an OLE file, from the streams
``\\x05SummaryInformation`` and ``\\x05DocumentSummaryInformation``,
if present.
Properties are converted to strings, integers or python datetime objects.
If a property is not present, its value is set to None.
"""
# first set all attributes to None:
for attrib in (self.SUMMARY_ATTRIBS + self.DOCSUM_ATTRIBS):
setattr(self, attrib, None)
if olefile.exists("\x05SummaryInformation"):
# get properties from the stream:
# (converting timestamps to python datetime, except total_edit_time,
# which is property #10)
props = olefile.getproperties("\x05SummaryInformation",
convert_time=True, no_conversion=[10])
# store them into this object's attributes:
for i in range(len(self.SUMMARY_ATTRIBS)):
# ids for standards properties start at 0x01, until 0x13
value = props.get(i+1, None)
setattr(self, self.SUMMARY_ATTRIBS[i], value)
if olefile.exists("\x05DocumentSummaryInformation"):
# get properties from the stream:
props = olefile.getproperties("\x05DocumentSummaryInformation",
convert_time=True)
# store them into this object's attributes:
for i in range(len(self.DOCSUM_ATTRIBS)):
# ids for standards properties start at 0x01, until 0x13
value = props.get(i+1, None)
setattr(self, self.DOCSUM_ATTRIBS[i], value)
def dump(self):
"""
Dump all metadata, for debugging purposes.
"""
print('Properties from SummaryInformation stream:')
for prop in self.SUMMARY_ATTRIBS:
value = getattr(self, prop)
print('- %s: %s' % (prop, repr(value)))
print('Properties from DocumentSummaryInformation stream:')
for prop in self.DOCSUM_ATTRIBS:
value = getattr(self, prop)
print('- %s: %s' % (prop, repr(value)))
#--- OleStream ---------------------------------------------------------------
class OleStream(io.BytesIO):
"""
OLE2 Stream
Returns a read-only file object which can be used to read
the contents of a OLE stream (instance of the BytesIO class).
To open a stream, use the openstream method in the OleFile class.
This function can be used with either ordinary streams,
or ministreams, depending on the offset, sectorsize, and
fat table arguments.
Attributes:
- size: actual size of data stream, after it was opened.
"""
# FIXME: should store the list of sects obtained by following
# the fat chain, and load new sectors on demand instead of
# loading it all in one go.
def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize, olefileio):
"""
Constructor for OleStream class.
:param fp: file object, the OLE container or the MiniFAT stream
:param sect: sector index of first sector in the stream
:param size: total size of the stream
:param offset: offset in bytes for the first FAT or MiniFAT sector
:param sectorsize: size of one sector
:param fat: array/list of sector indexes (FAT or MiniFAT)
:param filesize: size of OLE file (for debugging)
:param olefileio: OleFileIO object containing this stream
:returns: a BytesIO instance containing the OLE stream
"""
log.debug('OleStream.__init__:')
log.debug(' sect=%d (%X), size=%d, offset=%d, sectorsize=%d, len(fat)=%d, fp=%s'
%(sect,sect,size,offset,sectorsize,len(fat), repr(fp)))
self.ole = olefileio
#[PL] To detect malformed documents with FAT loops, we compute the
# expected number of sectors in the stream:
unknown_size = False
if size == UNKNOWN_SIZE:
# this is the case when called from OleFileIO._open(), and stream
# size is not known in advance (for example when reading the
# Directory stream). Then we can only guess maximum size:
size = len(fat)*sectorsize
# and we keep a record that size was unknown:
unknown_size = True
log.debug(' stream with UNKNOWN SIZE')
nb_sectors = (size + (sectorsize-1)) // sectorsize
log.debug('nb_sectors = %d' % nb_sectors)
# This number should (at least) be less than the total number of
# sectors in the given FAT:
if nb_sectors > len(fat):
self.ole._raise_defect(DEFECT_INCORRECT, 'malformed OLE document, stream too large')
# optimization(?): data is first a list of strings, and join() is called
# at the end to concatenate all in one string.
# (this may not be really useful with recent Python versions)
data = []
# if size is zero, then first sector index should be ENDOFCHAIN:
if size == 0 and sect != ENDOFCHAIN:
log.debug('size == 0 and sect != ENDOFCHAIN:')
self.ole._raise_defect(DEFECT_INCORRECT, 'incorrect OLE sector index for empty stream')
#[PL] A fixed-length for loop is used instead of an undefined while
# loop to avoid DoS attacks:
for i in range(nb_sectors):
log.debug('Reading stream sector[%d] = %Xh' % (i, sect))
# Sector index may be ENDOFCHAIN, but only if size was unknown
if sect == ENDOFCHAIN:
if unknown_size:
log.debug('Reached ENDOFCHAIN sector for stream with unknown size')
break
else:
# else this means that the stream is smaller than declared:
log.debug('sect=ENDOFCHAIN before expected size')
self.ole._raise_defect(DEFECT_INCORRECT, 'incomplete OLE stream')
# sector index should be within FAT:
if sect<0 or sect>=len(fat):
log.debug('sect=%d (%X) / len(fat)=%d' % (sect, sect, len(fat)))
log.debug('i=%d / nb_sectors=%d' %(i, nb_sectors))
## tmp_data = b"".join(data)
## f = open('test_debug.bin', 'wb')
## f.write(tmp_data)
## f.close()
## log.debug('data read so far: %d bytes' % len(tmp_data))
self.ole._raise_defect(DEFECT_INCORRECT, 'incorrect OLE FAT, sector index out of range')
# stop reading here if the exception is ignored:
break
#TODO: merge this code with OleFileIO.getsect() ?
#TODO: check if this works with 4K sectors:
try:
fp.seek(offset + sectorsize * sect)
except:
log.debug('sect=%d, seek=%d, filesize=%d' %
(sect, offset+sectorsize*sect, filesize))
self.ole._raise_defect(DEFECT_INCORRECT, 'OLE sector index out of range')
# stop reading here if the exception is ignored:
break
sector_data = fp.read(sectorsize)
# [PL] check if there was enough data:
# Note: if sector is the last of the file, sometimes it is not a
# complete sector (of 512 or 4K), so we may read less than
# sectorsize.
if len(sector_data)!=sectorsize and sect!=(len(fat)-1):
log.debug('sect=%d / len(fat)=%d, seek=%d / filesize=%d, len read=%d' %
(sect, len(fat), offset+sectorsize*sect, filesize, len(sector_data)))
log.debug('seek+len(read)=%d' % (offset+sectorsize*sect+len(sector_data)))
self.ole._raise_defect(DEFECT_INCORRECT, 'incomplete OLE sector')
data.append(sector_data)
# jump to next sector in the FAT:
try:
sect = fat[sect] & 0xFFFFFFFF # JYTHON-WORKAROUND
except IndexError:
# [PL] if pointer is out of the FAT an exception is raised
self.ole._raise_defect(DEFECT_INCORRECT, 'incorrect OLE FAT, sector index out of range')
# stop reading here if the exception is ignored:
break
#[PL] Last sector should be a "end of chain" marker:
# if sect != ENDOFCHAIN:
# raise IOError('incorrect last sector index in OLE stream')
data = b"".join(data)
# Data is truncated to the actual stream size:
if len(data) >= size:
log.debug('Read data of length %d, truncated to stream size %d' % (len(data), size))
data = data[:size]
# actual stream size is stored for future use:
self.size = size
elif unknown_size:
# actual stream size was not known, now we know the size of read
# data:
log.debug('Read data of length %d, the stream size was unknown' % len(data))
self.size = len(data)
else:
# read data is less than expected:
log.debug('Read data of length %d, less than expected stream size %d' % (len(data), size))
# TODO: provide details in exception message
self.size = len(data)
self.ole._raise_defect(DEFECT_INCORRECT, 'OLE stream size is less than declared')
# when all data is read in memory, BytesIO constructor is called
io.BytesIO.__init__(self, data)
# Then the OleStream object can be used as a read-only file object.
#--- OleDirectoryEntry -------------------------------------------------------
class OleDirectoryEntry:
"""
OLE2 Directory Entry
"""
#[PL] parsing code moved from OleFileIO.loaddirectory
# struct to parse directory entries:
# <: little-endian byte order, standard sizes
# (note: this should guarantee that Q returns a 64 bits int)
# 64s: string containing entry name in unicode UTF-16 (max 31 chars) + null char = 64 bytes
# H: uint16, number of bytes used in name buffer, including null = (len+1)*2
# B: uint8, dir entry type (between 0 and 5)
# B: uint8, color: 0=black, 1=red
# I: uint32, index of left child node in the red-black tree, NOSTREAM if none
# I: uint32, index of right child node in the red-black tree, NOSTREAM if none
# I: uint32, index of child root node if it is a storage, else NOSTREAM
# 16s: CLSID, unique identifier (only used if it is a storage)
# I: uint32, user flags
# Q (was 8s): uint64, creation timestamp or zero
# Q (was 8s): uint64, modification timestamp or zero
# I: uint32, SID of first sector if stream or ministream, SID of 1st sector
# of stream containing ministreams if root entry, 0 otherwise
# I: uint32, total stream size in bytes if stream (low 32 bits), 0 otherwise
# I: uint32, total stream size in bytes if stream (high 32 bits), 0 otherwise
STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII'
# size of a directory entry: 128 bytes
DIRENTRY_SIZE = 128
assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE
def __init__(self, entry, sid, olefile):
"""
Constructor for an OleDirectoryEntry object.
Parses a 128-bytes entry from the OLE Directory stream.
:param entry : string (must be 128 bytes long)
:param sid : index of this directory entry in the OLE file directory
:param olefile: OleFileIO containing this directory entry
"""
self.sid = sid
# ref to olefile is stored for future use
self.olefile = olefile
# kids is a list of children entries, if this entry is a storage:
# (list of OleDirectoryEntry objects)
self.kids = []
# kids_dict is a dictionary of children entries, indexed by their
# name in lowercase: used to quickly find an entry, and to detect
# duplicates
self.kids_dict = {}
# flag used to detect if the entry is referenced more than once in
# directory:
self.used = False
# decode DirEntry
(
self.name_raw, # 64s: string containing entry name in unicode UTF-16 (max 31 chars) + null char = 64 bytes
self.namelength, # H: uint16, number of bytes used in name buffer, including null = (len+1)*2
self.entry_type,
self.color,
self.sid_left,
self.sid_right,
self.sid_child,
clsid,
self.dwUserFlags,
self.createTime,
self.modifyTime,
self.isectStart,
self.sizeLow,
self.sizeHigh
) = struct.unpack(OleDirectoryEntry.STRUCT_DIRENTRY, entry)
if self.entry_type not in [STGTY_ROOT, STGTY_STORAGE, STGTY_STREAM, STGTY_EMPTY]:
olefile._raise_defect(DEFECT_INCORRECT, 'unhandled OLE storage type')
# only first directory entry can (and should) be root:
if self.entry_type == STGTY_ROOT and sid != 0:
olefile._raise_defect(DEFECT_INCORRECT, 'duplicate OLE root entry')
if sid == 0 and self.entry_type != STGTY_ROOT:
olefile._raise_defect(DEFECT_INCORRECT, 'incorrect OLE root entry')
#log.debug(struct.unpack(fmt_entry, entry[:len_entry]))
# name should be at most 31 unicode characters + null character,
# so 64 bytes in total (31*2 + 2):
if self.namelength>64:
olefile._raise_defect(DEFECT_INCORRECT, 'incorrect DirEntry name length >64 bytes')
# if exception not raised, namelength is set to the maximum value:
self.namelength = 64
# only characters without ending null char are kept:
self.name_utf16 = self.name_raw[:(self.namelength-2)]
#TODO: check if the name is actually followed by a null unicode character ([MS-CFB] 2.6.1)
#TODO: check if the name does not contain forbidden characters:
# [MS-CFB] 2.6.1: "The following characters are illegal and MUST NOT be part of the name: '/', '\', ':', '!'."
# name is converted from UTF-16LE to the path encoding specified in the OleFileIO:
self.name = olefile._decode_utf16_str(self.name_utf16)
log.debug('DirEntry SID=%d: %s' % (self.sid, repr(self.name)))
log.debug(' - type: %d' % self.entry_type)
log.debug(' - sect: %Xh' % self.isectStart)
log.debug(' - SID left: %d, right: %d, child: %d' % (self.sid_left,
self.sid_right, self.sid_child))
# sizeHigh is only used for 4K sectors, it should be zero for 512 bytes
# sectors, BUT apparently some implementations set it as 0xFFFFFFFF, 1
# or some other value so it cannot be raised as a defect in general:
if olefile.sectorsize == 512:
if self.sizeHigh != 0 and self.sizeHigh != 0xFFFFFFFF:
log.debug('sectorsize=%d, sizeLow=%d, sizeHigh=%d (%X)' %
(olefile.sectorsize, self.sizeLow, self.sizeHigh, self.sizeHigh))
olefile._raise_defect(DEFECT_UNSURE, 'incorrect OLE stream size')
self.size = self.sizeLow
else:
self.size = self.sizeLow + (long(self.sizeHigh)<<32)
log.debug(' - size: %d (sizeLow=%d, sizeHigh=%d)' % (self.size, self.sizeLow, self.sizeHigh))
self.clsid = _clsid(clsid)
# a storage should have a null size, BUT some implementations such as
# Word 8 for Mac seem to allow non-null values => Potential defect:
if self.entry_type == STGTY_STORAGE and self.size != 0:
olefile._raise_defect(DEFECT_POTENTIAL, 'OLE storage with size>0')
# check if stream is not already referenced elsewhere:
if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size>0:
if self.size < olefile.minisectorcutoff \
and self.entry_type==STGTY_STREAM: # only streams can be in MiniFAT
# ministream object
minifat = True
else:
minifat = False
olefile._check_duplicate_stream(self.isectStart, minifat)
def build_storage_tree(self):
"""
Read and build the red-black tree attached to this OleDirectoryEntry
object, if it is a storage.
Note that this method builds a tree of all subentries, so it should
only be called for the root object once.
"""
log.debug('build_storage_tree: SID=%d - %s - sid_child=%d'
% (self.sid, repr(self.name), self.sid_child))
if self.sid_child != NOSTREAM:
# if child SID is not NOSTREAM, then this entry is a storage.
# Let's walk through the tree of children to fill the kids list:
self.append_kids(self.sid_child)
# Note from OpenOffice documentation: the safest way is to
# recreate the tree because some implementations may store broken
# red-black trees...
# in the OLE file, entries are sorted on (length, name).
# for convenience, we sort them on name instead:
# (see rich comparison methods in this class)
self.kids.sort()
def append_kids(self, child_sid):
"""
Walk through red-black tree of children of this directory entry to add
all of them to the kids list. (recursive method)
:param child_sid: index of child directory entry to use, or None when called
first time for the root. (only used during recursion)
"""
log.debug('append_kids: child_sid=%d' % child_sid)
#[PL] this method was added to use simple recursion instead of a complex
# algorithm.
# if this is not a storage or a leaf of the tree, nothing to do:
if child_sid == NOSTREAM:
return
# check if child SID is in the proper range:
if child_sid<0 or child_sid>=len(self.olefile.direntries):
self.olefile._raise_defect(DEFECT_INCORRECT, 'OLE DirEntry index out of range')
else:
# get child direntry:
child = self.olefile._load_direntry(child_sid) #direntries[child_sid]
log.debug('append_kids: child_sid=%d - %s - sid_left=%d, sid_right=%d, sid_child=%d'
% (child.sid, repr(child.name), child.sid_left, child.sid_right, child.sid_child))
# the directory entries are organized as a red-black tree.
# (cf. Wikipedia for details)
# First walk through left side of the tree:
self.append_kids(child.sid_left)
# Check if its name is not already used (case-insensitive):
name_lower = child.name.lower()
if name_lower in self.kids_dict:
self.olefile._raise_defect(DEFECT_INCORRECT,
"Duplicate filename in OLE storage")
# Then the child_sid OleDirectoryEntry object is appended to the
# kids list and dictionary:
self.kids.append(child)
self.kids_dict[name_lower] = child
# Check if kid was not already referenced in a storage:
if child.used:
self.olefile._raise_defect(DEFECT_INCORRECT,
'OLE Entry referenced more than once')
child.used = True
# Finally walk through right side of the tree:
self.append_kids(child.sid_right)
# Afterwards build kid's own tree if it's also a storage:
child.build_storage_tree()
def __eq__(self, other):
"Compare entries by name"
return self.name == other.name
def __lt__(self, other):
"Compare entries by name"
return self.name < other.name
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
# Reflected __lt__() and __le__() will be used for __gt__() and __ge__()
#TODO: replace by the same function as MS implementation ?
# (order by name length first, then case-insensitive order)
def dump(self, tab = 0):
"Dump this entry, and all its subentries (for debug purposes only)"
TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)",
"(property)", "(root)"]
print(" "*tab + repr(self.name), TYPES[self.entry_type], end=' ')
if self.entry_type in (STGTY_STREAM, STGTY_ROOT):
print(self.size, "bytes", end=' ')
print()
if self.entry_type in (STGTY_STORAGE, STGTY_ROOT) and self.clsid:
print(" "*tab + "{%s}" % self.clsid)
for kid in self.kids:
kid.dump(tab + 2)
def getmtime(self):
"""
Return modification time of a directory entry.
:returns: None if modification time is null, a python datetime object
otherwise (UTC timezone)
new in version 0.26
"""
if self.modifyTime == 0:
return None
return filetime2datetime(self.modifyTime)
def getctime(self):
"""
Return creation time of a directory entry.
:returns: None if modification time is null, a python datetime object
otherwise (UTC timezone)
new in version 0.26
"""
if self.createTime == 0:
return None
return filetime2datetime(self.createTime)
#--- OleFileIO ----------------------------------------------------------------
class OleFileIO:
"""
OLE container object
This class encapsulates the interface to an OLE 2 structured
storage file. Use the listdir and openstream methods to
access the contents of this file.
Object names are given as a list of strings, one for each subentry
level. The root entry should be omitted. For example, the following
code extracts all image streams from a Microsoft Image Composer file::
ole = OleFileIO("fan.mic")
for entry in ole.listdir():
if entry[1:2] == "Image":
fin = ole.openstream(entry)
fout = open(entry[0:1], "wb")
while True:
s = fin.read(8192)
if not s:
break
fout.write(s)
You can use the viewer application provided with the Python Imaging
Library to view the resulting files (which happens to be standard
TIFF files).
"""
def __init__(self, filename=None, raise_defects=DEFECT_FATAL,
write_mode=False, debug=False, path_encoding=DEFAULT_PATH_ENCODING):
"""
Constructor for the OleFileIO class.
:param filename: file to open.
- if filename is a string smaller than 1536 bytes, it is the path
of the file to open. (bytes or unicode string)
- if filename is a string longer than 1535 bytes, it is parsed
as the content of an OLE file in memory. (bytes type only)
- if filename is a file-like object (with read, seek and tell methods),
it is parsed as-is.
:param raise_defects: minimal level for defects to be raised as exceptions.
(use DEFECT_FATAL for a typical application, DEFECT_INCORRECT for a
security-oriented application, see source code for details)
:param write_mode: bool, if True the file is opened in read/write mode instead
of read-only by default.
:param debug: bool, set debug mode (deprecated, not used anymore)
:param path_encoding: None or str, name of the codec to use for path
names (streams and storages), or None for Unicode.
Unicode by default on Python 3+, UTF-8 on Python 2.x.
(new in olefile 0.42, was hardcoded to Latin-1 until olefile v0.41)
"""
# minimal level for defects to be raised as exceptions:
self._raise_defects_level = raise_defects
#: list of defects/issues not raised as exceptions:
#: tuples of (exception type, message)
self.parsing_issues = []
self.write_mode = write_mode
self.path_encoding = path_encoding
self._filesize = None
self.fp = None
if filename:
self.open(filename, write_mode=write_mode)
def _raise_defect(self, defect_level, message, exception_type=IOError):
"""
This method should be called for any defect found during file parsing.
It may raise an IOError exception according to the minimal level chosen
for the OleFileIO object.
:param defect_level: defect level, possible values are:
- DEFECT_UNSURE : a case which looks weird, but not sure it's a defect
- DEFECT_POTENTIAL : a potential defect
- DEFECT_INCORRECT : an error according to specifications, but parsing can go on
- DEFECT_FATAL : an error which cannot be ignored, parsing is impossible
:param message: string describing the defect, used with raised exception.
:param exception_type: exception class to be raised, IOError by default
"""
# added by [PL]
if defect_level >= self._raise_defects_level:
log.error(message)
raise exception_type(message)
else:
# just record the issue, no exception raised:
self.parsing_issues.append((exception_type, message))
log.warning(message)
def _decode_utf16_str(self, utf16_str, errors='replace'):
"""
Decode a string encoded in UTF-16 LE format, as found in the OLE
directory or in property streams. Return a string encoded
according to the path_encoding specified for the OleFileIO object.
:param utf16_str: bytes string encoded in UTF-16 LE format
:param errors: str, see python documentation for str.decode()
:return: str, encoded according to path_encoding
"""
unicode_str = utf16_str.decode('UTF-16LE', errors)
if self.path_encoding:
# an encoding has been specified for path names:
return unicode_str.encode(self.path_encoding, errors)
else:
# path_encoding=None, return the Unicode string as-is:
return unicode_str
def open(self, filename, write_mode=False):
"""
Open an OLE2 file in read-only or read/write mode.
Read and parse the header, FAT and directory.
:param filename: string-like or file-like object, OLE file to parse
- if filename is a string smaller than 1536 bytes, it is the path
of the file to open. (bytes or unicode string)
- if filename is a string longer than 1535 bytes, it is parsed
as the content of an OLE file in memory. (bytes type only)
- if filename is a file-like object (with read, seek and tell methods),
it is parsed as-is.
:param write_mode: bool, if True the file is opened in read/write mode instead
of read-only by default. (ignored if filename is not a path)
"""
self.write_mode = write_mode
#[PL] check if filename is a string-like or file-like object:
# (it is better to check for a read() method)
if hasattr(filename, 'read'):
#TODO: also check seek and tell methods?
# file-like object: use it directly
self.fp = filename
elif isinstance(filename, bytes) and len(filename) >= MINIMAL_OLEFILE_SIZE:
# filename is a bytes string containing the OLE file to be parsed:
# convert it to BytesIO
self.fp = io.BytesIO(filename)
else:
# string-like object: filename of file on disk
if self.write_mode:
# open file in mode 'read with update, binary'
# According to https://docs.python.org/2/library/functions.html#open
# 'w' would truncate the file, 'a' may only append on some Unixes
mode = 'r+b'
else:
# read-only mode by default
mode = 'rb'
self.fp = open(filename, mode)
# obtain the filesize by using seek and tell, which should work on most
# file-like objects:
#TODO: do it above, using getsize with filename when possible?
#TODO: fix code to fail with clear exception when filesize cannot be obtained
filesize=0
self.fp.seek(0, os.SEEK_END)
try:
filesize = self.fp.tell()
finally:
self.fp.seek(0)
self._filesize = filesize
log.debug('File size: %d bytes (%Xh)' % (self._filesize, self._filesize))
# lists of streams in FAT and MiniFAT, to detect duplicate references
# (list of indexes of first sectors of each stream)
self._used_streams_fat = []
self._used_streams_minifat = []
header = self.fp.read(512)
if len(header) != 512 or header[:8] != MAGIC:
log.debug('Magic = %r instead of %r' % (header[:8], MAGIC))
self._raise_defect(DEFECT_FATAL, "not an OLE2 structured storage file")
# [PL] header structure according to AAF specifications:
##Header
##struct StructuredStorageHeader { // [offset from start (bytes), length (bytes)]
##BYTE _abSig[8]; // [00H,08] {0xd0, 0xcf, 0x11, 0xe0, 0xa1, 0xb1,
## // 0x1a, 0xe1} for current version
##CLSID _clsid; // [08H,16] reserved must be zero (WriteClassStg/
## // GetClassFile uses root directory class id)
##USHORT _uMinorVersion; // [18H,02] minor version of the format: 33 is
## // written by reference implementation
##USHORT _uDllVersion; // [1AH,02] major version of the dll/format: 3 for
## // 512-byte sectors, 4 for 4 KB sectors
##USHORT _uByteOrder; // [1CH,02] 0xFFFE: indicates Intel byte-ordering
##USHORT _uSectorShift; // [1EH,02] size of sectors in power-of-two;
## // typically 9 indicating 512-byte sectors
##USHORT _uMiniSectorShift; // [20H,02] size of mini-sectors in power-of-two;
## // typically 6 indicating 64-byte mini-sectors
##USHORT _usReserved; // [22H,02] reserved, must be zero
##ULONG _ulReserved1; // [24H,04] reserved, must be zero
##FSINDEX _csectDir; // [28H,04] must be zero for 512-byte sectors,
## // number of SECTs in directory chain for 4 KB
## // sectors
##FSINDEX _csectFat; // [2CH,04] number of SECTs in the FAT chain
##SECT _sectDirStart; // [30H,04] first SECT in the directory chain
##DFSIGNATURE _signature; // [34H,04] signature used for transactions; must
## // be zero. The reference implementation
## // does not support transactions
##ULONG _ulMiniSectorCutoff; // [38H,04] maximum size for a mini stream;
## // typically 4096 bytes
##SECT _sectMiniFatStart; // [3CH,04] first SECT in the MiniFAT chain
##FSINDEX _csectMiniFat; // [40H,04] number of SECTs in the MiniFAT chain
##SECT _sectDifStart; // [44H,04] first SECT in the DIFAT chain
##FSINDEX _csectDif; // [48H,04] number of SECTs in the DIFAT chain
##SECT _sectFat[109]; // [4CH,436] the SECTs of first 109 FAT sectors
##};
# [PL] header decoding:
# '<' indicates little-endian byte ordering for Intel (cf. struct module help)
fmt_header = '<8s16sHHHHHHLLLLLLLLLL'
header_size = struct.calcsize(fmt_header)
log.debug( "fmt_header size = %d, +FAT = %d" % (header_size, header_size + 109*4) )
header1 = header[:header_size]
(
self.header_signature,
self.header_clsid,
self.minor_version,
self.dll_version,
self.byte_order,
self.sector_shift,
self.mini_sector_shift,
self.reserved1,
self.reserved2,
self.num_dir_sectors,
self.num_fat_sectors,
self.first_dir_sector,
self.transaction_signature_number,
self.mini_stream_cutoff_size,
self.first_mini_fat_sector,
self.num_mini_fat_sectors,
self.first_difat_sector,
self.num_difat_sectors
) = struct.unpack(fmt_header, header1)
log.debug( struct.unpack(fmt_header, header1))
if self.header_signature != MAGIC:
# OLE signature should always be present
self._raise_defect(DEFECT_FATAL, "incorrect OLE signature")
if self.header_clsid != bytearray(16):
# according to AAF specs, CLSID should always be zero
self._raise_defect(DEFECT_INCORRECT, "incorrect CLSID in OLE header")
log.debug( "Minor Version = %d" % self.minor_version )
# TODO: according to MS-CFB, minor version should be 0x003E
log.debug( "DLL Version = %d (expected: 3 or 4)" % self.dll_version )
if self.dll_version not in [3, 4]:
# version 3: usual format, 512 bytes per sector
# version 4: large format, 4K per sector
self._raise_defect(DEFECT_INCORRECT, "incorrect DllVersion in OLE header")
log.debug( "Byte Order = %X (expected: FFFE)" % self.byte_order )
if self.byte_order != 0xFFFE:
# For now only common little-endian documents are handled correctly
self._raise_defect(DEFECT_FATAL, "incorrect ByteOrder in OLE header")
# TODO: add big-endian support for documents created on Mac ?
# But according to [MS-CFB] ? v20140502, ByteOrder MUST be 0xFFFE.
self.sector_size = 2**self.sector_shift
log.debug( "Sector Size = %d bytes (expected: 512 or 4096)" % self.sector_size )
if self.sector_size not in [512, 4096]:
self._raise_defect(DEFECT_INCORRECT, "incorrect sector_size in OLE header")
if (self.dll_version==3 and self.sector_size!=512) \
or (self.dll_version==4 and self.sector_size!=4096):
self._raise_defect(DEFECT_INCORRECT, "sector_size does not match DllVersion in OLE header")
self.mini_sector_size = 2**self.mini_sector_shift
log.debug( "MiniFAT Sector Size = %d bytes (expected: 64)" % self.mini_sector_size )
if self.mini_sector_size not in [64]:
self._raise_defect(DEFECT_INCORRECT, "incorrect mini_sector_size in OLE header")
if self.reserved1 != 0 or self.reserved2 != 0:
self._raise_defect(DEFECT_INCORRECT, "incorrect OLE header (non-null reserved bytes)")
log.debug( "Number of Directory sectors = %d" % self.num_dir_sectors )
# Number of directory sectors (only allowed if DllVersion != 3)
if self.sector_size==512 and self.num_dir_sectors!=0:
self._raise_defect(DEFECT_INCORRECT, "incorrect number of directory sectors in OLE header")
log.debug( "Number of FAT sectors = %d" % self.num_fat_sectors )
# num_fat_sectors = number of FAT sectors in the file
log.debug( "First Directory sector = %Xh" % self.first_dir_sector )
# first_dir_sector = 1st sector containing the directory
log.debug( "Transaction Signature Number = %d" % self.transaction_signature_number )
# Signature should be zero, BUT some implementations do not follow this
# rule => only a potential defect:
# (according to MS-CFB, may be != 0 for applications supporting file
# transactions)
if self.transaction_signature_number != 0:
self._raise_defect(DEFECT_POTENTIAL, "incorrect OLE header (transaction_signature_number>0)")
log.debug( "Mini Stream cutoff size = %Xh (expected: 1000h)" % self.mini_stream_cutoff_size )
# MS-CFB: This integer field MUST be set to 0x00001000. This field
# specifies the maximum size of a user-defined data stream allocated
# from the mini FAT and mini stream, and that cutoff is 4096 bytes.
# Any user-defined data stream larger than or equal to this cutoff size
# must be allocated as normal sectors from the FAT.
if self.mini_stream_cutoff_size != 0x1000:
self._raise_defect(DEFECT_INCORRECT, "incorrect mini_stream_cutoff_size in OLE header")
# if no exception is raised, the cutoff size is fixed to 0x1000
log.warning('Fixing the mini_stream_cutoff_size to 4096 (mandatory value) instead of %d' %
self.mini_stream_cutoff_size)
self.mini_stream_cutoff_size = 0x1000
# TODO: check if these values are OK
log.debug( "First MiniFAT sector = %Xh" % self.first_mini_fat_sector )
log.debug( "Number of MiniFAT sectors = %d" % self.num_mini_fat_sectors )
log.debug( "First DIFAT sector = %Xh" % self.first_difat_sector )
log.debug( "Number of DIFAT sectors = %d" % self.num_difat_sectors )
# calculate the number of sectors in the file
# (-1 because header doesn't count)
self.nb_sect = ( (filesize + self.sector_size-1) // self.sector_size) - 1
log.debug( "Maximum number of sectors in the file: %d (%Xh)" % (self.nb_sect, self.nb_sect))
#TODO: change this test, because an OLE file MAY contain other data
# after the last sector.
# file clsid
self.header_clsid = _clsid(header[8:24])
#TODO: remove redundant attributes, and fix the code which uses them?
self.sectorsize = self.sector_size #1 << i16(header, 30)
self.minisectorsize = self.mini_sector_size #1 << i16(header, 32)
self.minisectorcutoff = self.mini_stream_cutoff_size # i32(header, 56)
# check known streams for duplicate references (these are always in FAT,
# never in MiniFAT):
self._check_duplicate_stream(self.first_dir_sector)
# check MiniFAT only if it is not empty:
if self.num_mini_fat_sectors:
self._check_duplicate_stream(self.first_mini_fat_sector)
# check DIFAT only if it is not empty:
if self.num_difat_sectors:
self._check_duplicate_stream(self.first_difat_sector)
# Load file allocation tables
self.loadfat(header)
# Load directory. This sets both the direntries list (ordered by sid)
# and the root (ordered by hierarchy) members.
self.loaddirectory(self.first_dir_sector)
self.ministream = None
self.minifatsect = self.first_mini_fat_sector
def close(self):
"""
close the OLE file, to release the file object
"""
self.fp.close()
def _check_duplicate_stream(self, first_sect, minifat=False):
"""
Checks if a stream has not been already referenced elsewhere.
This method should only be called once for each known stream, and only
if stream size is not null.
:param first_sect: int, index of first sector of the stream in FAT
:param minifat: bool, if True, stream is located in the MiniFAT, else in the FAT
"""
if minifat:
log.debug('_check_duplicate_stream: sect=%Xh in MiniFAT' % first_sect)
used_streams = self._used_streams_minifat
else:
log.debug('_check_duplicate_stream: sect=%Xh in FAT' % first_sect)
# some values can be safely ignored (not a real stream):
if first_sect in (DIFSECT,FATSECT,ENDOFCHAIN,FREESECT):
return
used_streams = self._used_streams_fat
#TODO: would it be more efficient using a dict or hash values, instead
# of a list of long ?
if first_sect in used_streams:
self._raise_defect(DEFECT_INCORRECT, 'Stream referenced twice')
else:
used_streams.append(first_sect)
def dumpfat(self, fat, firstindex=0):
"""
Display a part of FAT in human-readable form for debugging purposes
"""
# dictionary to convert special FAT values in human-readable strings
VPL = 8 # values per line (8+1 * 8+1 = 81)
fatnames = {
FREESECT: "..free..",
ENDOFCHAIN: "[ END. ]",
FATSECT: "FATSECT ",
DIFSECT: "DIFSECT "
}
nbsect = len(fat)
nlines = (nbsect+VPL-1)//VPL
print("index", end=" ")
for i in range(VPL):
print("%8X" % i, end=" ")
print()
for l in range(nlines):
index = l*VPL
print("%6X:" % (firstindex+index), end=" ")
for i in range(index, index+VPL):
if i>=nbsect:
break
sect = fat[i]
aux = sect & 0xFFFFFFFF # JYTHON-WORKAROUND
if aux in fatnames:
name = fatnames[aux]
else:
if sect == i+1:
name = " --->"
else:
name = "%8X" % sect
print(name, end=" ")
print()
def dumpsect(self, sector, firstindex=0):
"""
Display a sector in a human-readable form, for debugging purposes
"""
VPL=8 # number of values per line (8+1 * 8+1 = 81)
tab = array.array(UINT32, sector)
if sys.byteorder == 'big':
tab.byteswap()
nbsect = len(tab)
nlines = (nbsect+VPL-1)//VPL
print("index", end=" ")
for i in range(VPL):
print("%8X" % i, end=" ")
print()
for l in range(nlines):
index = l*VPL
print("%6X:" % (firstindex+index), end=" ")
for i in range(index, index+VPL):
if i>=nbsect:
break
sect = tab[i]
name = "%8X" % sect
print(name, end=" ")
print()
def sect2array(self, sect):
"""
convert a sector to an array of 32 bits unsigned integers,
swapping bytes on big endian CPUs such as PowerPC (old Macs)
"""
a = array.array(UINT32, sect)
# if CPU is big endian, swap bytes:
if sys.byteorder == 'big':
a.byteswap()
return a
def loadfat_sect(self, sect):
"""
Adds the indexes of the given sector to the FAT
:param sect: string containing the first FAT sector, or array of long integers
:returns: index of last FAT sector.
"""
# a FAT sector is an array of ulong integers.
if isinstance(sect, array.array):
# if sect is already an array it is directly used
fat1 = sect
else:
# if it's a raw sector, it is parsed in an array
fat1 = self.sect2array(sect)
# Display the sector contents only if the logging level is debug:
if log.isEnabledFor(logging.DEBUG):
self.dumpsect(sect)
# The FAT is a sector chain starting at the first index of itself.
# initialize isect, just in case:
isect = None
for isect in fat1:
isect = isect & 0xFFFFFFFF # JYTHON-WORKAROUND
log.debug("isect = %X" % isect)
if isect == ENDOFCHAIN or isect == FREESECT:
# the end of the sector chain has been reached
log.debug("found end of sector chain")
break
# read the FAT sector
s = self.getsect(isect)
# parse it as an array of 32 bits integers, and add it to the
# global FAT array
nextfat = self.sect2array(s)
self.fat = self.fat + nextfat
return isect
def loadfat(self, header):
"""
Load the FAT table.
"""
# The 1st sector of the file contains sector numbers for the first 109
# FAT sectors, right after the header which is 76 bytes long.
# (always 109, whatever the sector size: 512 bytes = 76+4*109)
# Additional sectors are described by DIF blocks
log.debug('Loading the FAT table, starting with the 1st sector after the header')
sect = header[76:512]
log.debug( "len(sect)=%d, so %d integers" % (len(sect), len(sect)//4) )
#fat = []
# [PL] FAT is an array of 32 bits unsigned ints, it's more effective
# to use an array than a list in Python.
# It's initialized as empty first:
self.fat = array.array(UINT32)
self.loadfat_sect(sect)
#self.dumpfat(self.fat)
## for i in range(0, len(sect), 4):
## ix = i32(sect, i)
## #[PL] if ix == -2 or ix == -1: # ix == 0xFFFFFFFE or ix == 0xFFFFFFFF:
## if ix == 0xFFFFFFFE or ix == 0xFFFFFFFF:
## break
## s = self.getsect(ix)
## #fat = fat + [i32(s, i) for i in range(0, len(s), 4)]
## fat = fat + array.array(UINT32, s)
if self.num_difat_sectors != 0:
log.debug('DIFAT is used, because file size > 6.8MB.')
# [PL] There's a DIFAT because file is larger than 6.8MB
# some checks just in case:
if self.num_fat_sectors <= 109:
# there must be at least 109 blocks in header and the rest in
# DIFAT, so number of sectors must be >109.
self._raise_defect(DEFECT_INCORRECT, 'incorrect DIFAT, not enough sectors')
if self.first_difat_sector >= self.nb_sect:
# initial DIFAT block index must be valid
self._raise_defect(DEFECT_FATAL, 'incorrect DIFAT, first index out of range')
log.debug( "DIFAT analysis..." )
# We compute the necessary number of DIFAT sectors :
# Number of pointers per DIFAT sector = (sectorsize/4)-1
# (-1 because the last pointer is the next DIFAT sector number)
nb_difat_sectors = (self.sectorsize//4)-1
# (if 512 bytes: each DIFAT sector = 127 pointers + 1 towards next DIFAT sector)
nb_difat = (self.num_fat_sectors-109 + nb_difat_sectors-1)//nb_difat_sectors
log.debug( "nb_difat = %d" % nb_difat )
if self.num_difat_sectors != nb_difat:
raise IOError('incorrect DIFAT')
isect_difat = self.first_difat_sector
for i in iterrange(nb_difat):
log.debug( "DIFAT block %d, sector %X" % (i, isect_difat) )
#TODO: check if corresponding FAT SID = DIFSECT
sector_difat = self.getsect(isect_difat)
difat = self.sect2array(sector_difat)
# Display the sector contents only if the logging level is debug:
if log.isEnabledFor(logging.DEBUG):
self.dumpsect(sector_difat)
self.loadfat_sect(difat[:nb_difat_sectors])
# last DIFAT pointer is next DIFAT sector:
isect_difat = difat[nb_difat_sectors]
log.debug( "next DIFAT sector: %X" % isect_difat )
# checks:
if isect_difat not in [ENDOFCHAIN, FREESECT]:
# last DIFAT pointer value must be ENDOFCHAIN or FREESECT
raise IOError('incorrect end of DIFAT')
## if len(self.fat) != self.num_fat_sectors:
## # FAT should contain num_fat_sectors blocks
## print("FAT length: %d instead of %d" % (len(self.fat), self.num_fat_sectors))
## raise IOError('incorrect DIFAT')
else:
log.debug('No DIFAT, because file size < 6.8MB.')
# since FAT is read from fixed-size sectors, it may contain more values
# than the actual number of sectors in the file.
# Keep only the relevant sector indexes:
if len(self.fat) > self.nb_sect:
log.debug('len(fat)=%d, shrunk to nb_sect=%d' % (len(self.fat), self.nb_sect))
self.fat = self.fat[:self.nb_sect]
log.debug('FAT references %d sectors / Maximum %d sectors in file' % (len(self.fat), self.nb_sect))
# Display the FAT contents only if the logging level is debug:
if log.isEnabledFor(logging.DEBUG):
log.debug('\nFAT:')
self.dumpfat(self.fat)
def loadminifat(self):
"""
Load the MiniFAT table.
"""
# MiniFAT is stored in a standard sub-stream, pointed to by a header
# field.
# NOTE: there are two sizes to take into account for this stream:
# 1) Stream size is calculated according to the number of sectors
# declared in the OLE header. This allocated stream may be more than
# needed to store the actual sector indexes.
# (self.num_mini_fat_sectors is the number of sectors of size self.sector_size)
stream_size = self.num_mini_fat_sectors * self.sector_size
# 2) Actually used size is calculated by dividing the MiniStream size
# (given by root entry size) by the size of mini sectors, *4 for
# 32 bits indexes:
nb_minisectors = (self.root.size + self.mini_sector_size-1) // self.mini_sector_size
used_size = nb_minisectors * 4
log.debug('loadminifat(): minifatsect=%d, nb FAT sectors=%d, used_size=%d, stream_size=%d, nb MiniSectors=%d' %
(self.minifatsect, self.num_mini_fat_sectors, used_size, stream_size, nb_minisectors))
if used_size > stream_size:
# This is not really a problem, but may indicate a wrong implementation:
self._raise_defect(DEFECT_INCORRECT, 'OLE MiniStream is larger than MiniFAT')
# In any case, first read stream_size:
s = self._open(self.minifatsect, stream_size, force_FAT=True).read()
#[PL] Old code replaced by an array:
#self.minifat = [i32(s, i) for i in range(0, len(s), 4)]
self.minifat = self.sect2array(s)
# Then shrink the array to used size, to avoid indexes out of MiniStream:
log.debug('MiniFAT shrunk from %d to %d sectors' % (len(self.minifat), nb_minisectors))
self.minifat = self.minifat[:nb_minisectors]
log.debug('loadminifat(): len=%d' % len(self.minifat))
# Display the FAT contents only if the logging level is debug:
if log.isEnabledFor(logging.DEBUG):
log.debug('\nMiniFAT:')
self.dumpfat(self.minifat)
def getsect(self, sect):
"""
Read given sector from file on disk.
:param sect: int, sector index
:returns: a string containing the sector data.
"""
# From [MS-CFB]: A sector number can be converted into a byte offset
# into the file by using the following formula:
# (sector number + 1) x Sector Size.
# This implies that sector #0 of the file begins at byte offset Sector
# Size, not at 0.
# [PL] the original code in PIL was wrong when sectors are 4KB instead of
# 512 bytes:
#self.fp.seek(512 + self.sectorsize * sect)
#[PL]: added safety checks:
#print("getsect(%X)" % sect)
try:
self.fp.seek(self.sectorsize * (sect+1))
except:
log.debug('getsect(): sect=%X, seek=%d, filesize=%d' %
(sect, self.sectorsize*(sect+1), self._filesize))
self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
sector = self.fp.read(self.sectorsize)
if len(sector) != self.sectorsize:
log.debug('getsect(): sect=%X, read=%d, sectorsize=%d' %
(sect, len(sector), self.sectorsize))
self._raise_defect(DEFECT_FATAL, 'incomplete OLE sector')
return sector
def write_sect(self, sect, data, padding=b'\x00'):
"""
Write given sector to file on disk.
:param sect: int, sector index
:param data: bytes, sector data
:param padding: single byte, padding character if data < sector size
"""
if not isinstance(data, bytes):
raise TypeError("write_sect: data must be a bytes string")
if not isinstance(padding, bytes) or len(padding)!=1:
raise TypeError("write_sect: padding must be a bytes string of 1 char")
#TODO: we could allow padding=None for no padding at all
try:
self.fp.seek(self.sectorsize * (sect+1))
except:
log.debug('write_sect(): sect=%X, seek=%d, filesize=%d' %
(sect, self.sectorsize*(sect+1), self._filesize))
self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
if len(data) < self.sectorsize:
# add padding
data += padding * (self.sectorsize - len(data))
elif len(data) < self.sectorsize:
raise ValueError("Data is larger than sector size")
self.fp.write(data)
def loaddirectory(self, sect):
"""
Load the directory.
:param sect: sector index of directory stream.
"""
log.debug('Loading the Directory:')
# The directory is stored in a standard
# substream, independent of its size.
# open directory stream as a read-only file:
# (stream size is not known in advance)
self.directory_fp = self._open(sect)
#[PL] to detect malformed documents and avoid DoS attacks, the maximum
# number of directory entries can be calculated:
max_entries = self.directory_fp.size // 128
log.debug('loaddirectory: size=%d, max_entries=%d' %
(self.directory_fp.size, max_entries))
# Create list of directory entries
#self.direntries = []
# We start with a list of "None" object
self.direntries = [None] * max_entries
## for sid in iterrange(max_entries):
## entry = fp.read(128)
## if not entry:
## break
## self.direntries.append(OleDirectoryEntry(entry, sid, self))
# load root entry:
root_entry = self._load_direntry(0)
# Root entry is the first entry:
self.root = self.direntries[0]
# TODO: read ALL directory entries (ignore bad entries?)
# TODO: adapt build_storage_tree to avoid duplicate reads
# for i in range(1, max_entries):
# self._load_direntry(i)
# read and build all storage trees, starting from the root:
self.root.build_storage_tree()
def _load_direntry (self, sid):
"""
Load a directory entry from the directory.
This method should only be called once for each storage/stream when
loading the directory.
:param sid: index of storage/stream in the directory.
:returns: a OleDirectoryEntry object
:exception IOError: if the entry has always been referenced.
"""
# check if SID is OK:
if sid<0 or sid>=len(self.direntries):
self._raise_defect(DEFECT_FATAL, "OLE directory index out of range")
# check if entry was already referenced:
if self.direntries[sid] is not None:
self._raise_defect(DEFECT_INCORRECT,
"double reference for OLE stream/storage")
# if exception not raised, return the object
return self.direntries[sid]
self.directory_fp.seek(sid * 128)
entry = self.directory_fp.read(128)
self.direntries[sid] = OleDirectoryEntry(entry, sid, self)
return self.direntries[sid]
def dumpdirectory(self):
"""
Dump directory (for debugging only)
"""
self.root.dump()
def _open(self, start, size = UNKNOWN_SIZE, force_FAT=False):
"""
Open a stream, either in FAT or MiniFAT according to its size.
(openstream helper)
:param start: index of first sector
:param size: size of stream (or nothing if size is unknown)
:param force_FAT: if False (default), stream will be opened in FAT or MiniFAT
according to size. If True, it will always be opened in FAT.
"""
log.debug('OleFileIO.open(): sect=%Xh, size=%d, force_FAT=%s' %
(start, size, str(force_FAT)))
# stream size is compared to the mini_stream_cutoff_size threshold:
if size < self.minisectorcutoff and not force_FAT:
# ministream object
if not self.ministream:
# load MiniFAT if it wasn't already done:
self.loadminifat()
# The first sector index of the miniFAT stream is stored in the
# root directory entry:
size_ministream = self.root.size
log.debug('Opening MiniStream: sect=%Xh, size=%d' %
(self.root.isectStart, size_ministream))
self.ministream = self._open(self.root.isectStart,
size_ministream, force_FAT=True)
return OleStream(fp=self.ministream, sect=start, size=size,
offset=0, sectorsize=self.minisectorsize,
fat=self.minifat, filesize=self.ministream.size,
olefileio=self)
else:
# standard stream
return OleStream(fp=self.fp, sect=start, size=size,
offset=self.sectorsize,
sectorsize=self.sectorsize, fat=self.fat,
filesize=self._filesize,
olefileio=self)
def _list(self, files, prefix, node, streams=True, storages=False):
"""
listdir helper
:param files: list of files to fill in
:param prefix: current location in storage tree (list of names)
:param node: current node (OleDirectoryEntry object)
:param streams: bool, include streams if True (True by default) - new in v0.26
:param storages: bool, include storages if True (False by default) - new in v0.26
(note: the root storage is never included)
"""
prefix = prefix + [node.name]
for entry in node.kids:
if entry.entry_type == STGTY_STORAGE:
# this is a storage
if storages:
# add it to the list
files.append(prefix[1:] + [entry.name])
# check its kids
self._list(files, prefix, entry, streams, storages)
elif entry.entry_type == STGTY_STREAM:
# this is a stream
if streams:
# add it to the list
files.append(prefix[1:] + [entry.name])
else:
self._raise_defect(DEFECT_INCORRECT, 'The directory tree contains an entry which is not a stream nor a storage.')
def listdir(self, streams=True, storages=False):
"""
Return a list of streams and/or storages stored in this file
:param streams: bool, include streams if True (True by default) - new in v0.26
:param storages: bool, include storages if True (False by default) - new in v0.26
(note: the root storage is never included)
:returns: list of stream and/or storage paths
"""
files = []
self._list(files, [], self.root, streams, storages)
return files
def _find(self, filename):
"""
Returns directory entry of given filename. (openstream helper)
Note: this method is case-insensitive.
:param filename: path of stream in storage tree (except root entry), either:
- a string using Unix path syntax, for example:
'storage_1/storage_1.2/stream'
- or a list of storage filenames, path to the desired stream/storage.
Example: ['storage_1', 'storage_1.2', 'stream']
:returns: sid of requested filename
:exception IOError: if file not found
"""
# if filename is a string instead of a list, split it on slashes to
# convert to a list:
if isinstance(filename, basestring):
filename = filename.split('/')
# walk across storage tree, following given path:
node = self.root
for name in filename:
for kid in node.kids:
if kid.name.lower() == name.lower():
break
else:
raise IOError("file not found")
node = kid
return node.sid
def openstream(self, filename):
"""
Open a stream as a read-only file object (BytesIO).
Note: filename is case-insensitive.
:param filename: path of stream in storage tree (except root entry), either:
- a string using Unix path syntax, for example:
'storage_1/storage_1.2/stream'
- or a list of storage filenames, path to the desired stream/storage.
Example: ['storage_1', 'storage_1.2', 'stream']
:returns: file object (read-only)
:exception IOError: if filename not found, or if this is not a stream.
"""
sid = self._find(filename)
entry = self.direntries[sid]
if entry.entry_type != STGTY_STREAM:
raise IOError("this file is not a stream")
return self._open(entry.isectStart, entry.size)
def write_stream(self, stream_name, data):
"""
Write a stream to disk. For now, it is only possible to replace an
existing stream by data of the same size.
:param stream_name: path of stream in storage tree (except root entry), either:
- a string using Unix path syntax, for example:
'storage_1/storage_1.2/stream'
- or a list of storage filenames, path to the desired stream/storage.
Example: ['storage_1', 'storage_1.2', 'stream']
:param data: bytes, data to be written, must be the same size as the original
stream.
"""
if not isinstance(data, bytes):
raise TypeError("write_stream: data must be a bytes string")
sid = self._find(stream_name)
entry = self.direntries[sid]
if entry.entry_type != STGTY_STREAM:
raise IOError("this is not a stream")
size = entry.size
if size != len(data):
raise ValueError("write_stream: data must be the same size as the existing stream")
if size < self.minisectorcutoff:
raise NotImplementedError("Writing a stream in MiniFAT is not implemented yet")
sect = entry.isectStart
# number of sectors to write
nb_sectors = (size + (self.sectorsize-1)) // self.sectorsize
log.debug('nb_sectors = %d' % nb_sectors)
for i in range(nb_sectors):
## try:
## self.fp.seek(offset + self.sectorsize * sect)
## except:
## log.debug('sect=%d, seek=%d' %
## (sect, offset+self.sectorsize*sect))
## raise IOError('OLE sector index out of range')
# extract one sector from data, the last one being smaller:
if i<(nb_sectors-1):
data_sector = data [i*self.sectorsize : (i+1)*self.sectorsize]
#TODO: comment this if it works
assert(len(data_sector)==self.sectorsize)
else:
data_sector = data [i*self.sectorsize:]
#TODO: comment this if it works
log.debug('write_stream: size=%d sectorsize=%d data_sector=%Xh size%%sectorsize=%d'
% (size, self.sectorsize, len(data_sector), size % self.sectorsize))
assert(len(data_sector) % self.sectorsize==size % self.sectorsize)
self.write_sect(sect, data_sector)
## self.fp.write(data_sector)
# jump to next sector in the FAT:
try:
sect = self.fat[sect]
except IndexError:
# [PL] if pointer is out of the FAT an exception is raised
raise IOError('incorrect OLE FAT, sector index out of range')
#[PL] Last sector should be a "end of chain" marker:
if sect != ENDOFCHAIN:
raise IOError('incorrect last sector index in OLE stream')
def get_type(self, filename):
"""
Test if given filename exists as a stream or a storage in the OLE
container, and return its type.
:param filename: path of stream in storage tree. (see openstream for syntax)
:returns: False if object does not exist, its entry type (>0) otherwise:
- STGTY_STREAM: a stream
- STGTY_STORAGE: a storage
- STGTY_ROOT: the root entry
"""
try:
sid = self._find(filename)
entry = self.direntries[sid]
return entry.entry_type
except:
return False
def getclsid(self, filename):
"""
Return clsid of a stream/storage.
:param filename: path of stream/storage in storage tree. (see openstream for
syntax)
:returns: Empty string if clsid is null, a printable representation of the clsid otherwise
new in version 0.44
"""
sid = self._find(filename)
entry = self.direntries[sid]
return entry.clsid
def getmtime(self, filename):
"""
Return modification time of a stream/storage.
:param filename: path of stream/storage in storage tree. (see openstream for
syntax)
:returns: None if modification time is null, a python datetime object
otherwise (UTC timezone)
new in version 0.26
"""
sid = self._find(filename)
entry = self.direntries[sid]
return entry.getmtime()
def getctime(self, filename):
"""
Return creation time of a stream/storage.
:param filename: path of stream/storage in storage tree. (see openstream for
syntax)
:returns: None if creation time is null, a python datetime object
otherwise (UTC timezone)
new in version 0.26
"""
sid = self._find(filename)
entry = self.direntries[sid]
return entry.getctime()
def exists(self, filename):
"""
Test if given filename exists as a stream or a storage in the OLE
container.
Note: filename is case-insensitive.
:param filename: path of stream in storage tree. (see openstream for syntax)
:returns: True if object exist, else False.
"""
try:
sid = self._find(filename)
return True
except:
return False
def get_size(self, filename):
"""
Return size of a stream in the OLE container, in bytes.
:param filename: path of stream in storage tree (see openstream for syntax)
:returns: size in bytes (long integer)
:exception IOError: if file not found
:exception TypeError: if this is not a stream.
"""
sid = self._find(filename)
entry = self.direntries[sid]
if entry.entry_type != STGTY_STREAM:
#TODO: Should it return zero instead of raising an exception ?
raise TypeError('object is not an OLE stream')
return entry.size
def get_rootentry_name(self):
"""
Return root entry name. Should usually be 'Root Entry' or 'R' in most
implementations.
"""
return self.root.name
def getproperties(self, filename, convert_time=False, no_conversion=None):
"""
Return properties described in substream.
:param filename: path of stream in storage tree (see openstream for syntax)
:param convert_time: bool, if True timestamps will be converted to Python datetime
:param no_conversion: None or list of int, timestamps not to be converted
(for example total editing time is not a real timestamp)
:returns: a dictionary of values indexed by id (integer)
"""
#REFERENCE: [MS-OLEPS] https://msdn.microsoft.com/en-us/library/dd942421.aspx
# make sure no_conversion is a list, just to simplify code below:
if no_conversion == None:
no_conversion = []
# stream path as a string to report exceptions:
streampath = filename
if not isinstance(streampath, str):
streampath = '/'.join(streampath)
fp = self.openstream(filename)
data = {}
try:
# header
s = fp.read(28)
clsid = _clsid(s[8:24])
# format id
s = fp.read(20)
fmtid = _clsid(s[:16])
fp.seek(i32(s, 16))
# get section
s = b"****" + fp.read(i32(fp.read(4))-4)
# number of properties:
num_props = i32(s, 4)
except BaseException as exc:
# catch exception while parsing property header, and only raise
# a DEFECT_INCORRECT then return an empty dict, because this is not
# a fatal error when parsing the whole file
msg = 'Error while parsing properties header in stream %s: %s' % (
repr(streampath), exc)
self._raise_defect(DEFECT_INCORRECT, msg, type(exc))
return data
for i in range(num_props):
property_id = 0 # just in case of an exception
try:
property_id = i32(s, 8+i*8)
offset = i32(s, 12+i*8)
property_type = i32(s, offset)
log.debug('property id=%d: type=%d offset=%X' % (property_id, property_type, offset))
# test for common types first (should perhaps use
# a dictionary instead?)
if property_type == VT_I2: # 16-bit signed integer
value = i16(s, offset+4)
if value >= 32768:
value = value - 65536
elif property_type == VT_UI2: # 2-byte unsigned integer
value = i16(s, offset+4)
elif property_type in (VT_I4, VT_INT, VT_ERROR):
# VT_I4: 32-bit signed integer
# VT_ERROR: HRESULT, similar to 32-bit signed integer,
# see https://msdn.microsoft.com/en-us/library/cc230330.aspx
value = i32(s, offset+4)
elif property_type in (VT_UI4, VT_UINT): # 4-byte unsigned integer
value = i32(s, offset+4) # FIXME
elif property_type in (VT_BSTR, VT_LPSTR):
# CodePageString, see https://msdn.microsoft.com/en-us/library/dd942354.aspx
# size is a 32 bits integer, including the null terminator, and
# possibly trailing or embedded null chars
#TODO: if codepage is unicode, the string should be converted as such
count = i32(s, offset+4)
value = s[offset+8:offset+8+count-1]
# remove all null chars:
value = value.replace(b'\x00', b'')
elif property_type == VT_BLOB:
# binary large object (BLOB)
# see https://msdn.microsoft.com/en-us/library/dd942282.aspx
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
elif property_type == VT_LPWSTR:
# UnicodeString
# see https://msdn.microsoft.com/en-us/library/dd942313.aspx
# "the string should NOT contain embedded or additional trailing
# null characters."
count = i32(s, offset+4)
value = self._decode_utf16_str(s[offset+8:offset+8+count*2])
elif property_type == VT_FILETIME:
value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32)
# FILETIME is a 64-bit int: "number of 100ns periods
# since Jan 1,1601".
if convert_time and property_id not in no_conversion:
log.debug('Converting property #%d to python datetime, value=%d=%fs'
%(property_id, value, float(value)/10000000))
# convert FILETIME to Python datetime.datetime
# inspired from https://code.activestate.com/recipes/511425-filetime-to-datetime/
_FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0)
log.debug('timedelta days=%d' % (value//(10*1000000*3600*24)))
value = _FILETIME_null_date + datetime.timedelta(microseconds=value//10)
else:
# legacy code kept for backward compatibility: returns a
# number of seconds since Jan 1,1601
value = value // 10000000 # seconds
elif property_type == VT_UI1: # 1-byte unsigned integer
value = i8(s[offset+4])
elif property_type == VT_CLSID:
value = _clsid(s[offset+4:offset+20])
elif property_type == VT_CF:
# PropertyIdentifier or ClipboardData??
# see https://msdn.microsoft.com/en-us/library/dd941945.aspx
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
elif property_type == VT_BOOL:
# VARIANT_BOOL, 16 bits bool, 0x0000=Fals, 0xFFFF=True
# see https://msdn.microsoft.com/en-us/library/cc237864.aspx
value = bool(i16(s, offset+4))
else:
value = None # everything else yields "None"
log.debug('property id=%d: type=%d not implemented in parser yet' % (property_id, property_type))
# missing: VT_EMPTY, VT_NULL, VT_R4, VT_R8, VT_CY, VT_DATE,
# VT_DECIMAL, VT_I1, VT_I8, VT_UI8,
# see https://msdn.microsoft.com/en-us/library/dd942033.aspx
# FIXME: add support for VT_VECTOR
# VT_VECTOR is a 32 uint giving the number of items, followed by
# the items in sequence. The VT_VECTOR value is combined with the
# type of items, e.g. VT_VECTOR|VT_BSTR
# see https://msdn.microsoft.com/en-us/library/dd942011.aspx
#print("%08x" % property_id, repr(value), end=" ")
#print("(%s)" % VT[i32(s, offset) & 0xFFF])
data[property_id] = value
except BaseException as exc:
# catch exception while parsing each property, and only raise
# a DEFECT_INCORRECT, because parsing can go on
msg = 'Error while parsing property id %d in stream %s: %s' % (
property_id, repr(streampath), exc)
self._raise_defect(DEFECT_INCORRECT, msg, type(exc))
return data
def get_metadata(self):
"""
Parse standard properties streams, return an OleMetadata object
containing all the available metadata.
(also stored in the metadata attribute of the OleFileIO object)
new in version 0.25
"""
self.metadata = OleMetadata()
self.metadata.parse_properties(self)
return self.metadata
#
# --------------------------------------------------------------------
# This script can be used to dump the directory of any OLE2 structured
# storage file.
if __name__ == "__main__":
import sys, optparse
DEFAULT_LOG_LEVEL = "warning" # Default log level
LOG_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
usage = 'usage: %prog [options] <filename> [filename2 ...]'
parser = optparse.OptionParser(usage=usage)
parser.add_option("-c", action="store_true", dest="check_streams",
help='check all streams (for debugging purposes)')
parser.add_option("-d", action="store_true", dest="debug_mode",
help='debug mode, shortcut for -l debug (displays a lot of debug information, for developers only)')
parser.add_option('-l', '--loglevel', dest="loglevel", action="store", default=DEFAULT_LOG_LEVEL,
help="logging level debug/info/warning/error/critical (default=%default)")
(options, args) = parser.parse_args()
print('olefile version %s %s - https://www.decalage.info/en/olefile\n' % (__version__, __date__))
# Print help if no arguments are passed
if len(args) == 0:
print(__doc__)
parser.print_help()
sys.exit()
if options.debug_mode:
options.loglevel = 'debug'
# setup logging to the console
logging.basicConfig(level=LOG_LEVELS[options.loglevel], format='%(levelname)-8s %(message)s')
# also enable the module's logger:
enable_logging()
for filename in args:
try:
ole = OleFileIO(filename)#, raise_defects=DEFECT_INCORRECT)
print("-" * 68)
print(filename)
print("-" * 68)
ole.dumpdirectory()
for streamname in ole.listdir():
if streamname[-1][0] == "\005":
print("%r: properties" % streamname)
try:
props = ole.getproperties(streamname, convert_time=True)
props = sorted(props.items())
for k, v in props:
#[PL]: avoid to display too large or binary values:
if isinstance(v, (basestring, bytes)):
if len(v) > 50:
v = v[:50]
if isinstance(v, bytes):
# quick and dirty binary check:
for c in (1,2,3,4,5,6,7,11,12,14,15,16,17,18,19,20,
21,22,23,24,25,26,27,28,29,30,31):
if c in bytearray(v):
v = '(binary data)'
break
print(" ", k, v)
except:
log.exception('Error while parsing property stream %r' % streamname)
if options.check_streams:
# Read all streams to check if there are errors:
print('\nChecking streams...')
for streamname in ole.listdir():
# print name using repr() to convert binary chars to \xNN:
print('-', repr('/'.join(streamname)),'-', end=' ')
st_type = ole.get_type(streamname)
if st_type == STGTY_STREAM:
print('size %d' % ole.get_size(streamname))
# just try to read stream in memory:
ole.openstream(streamname)
else:
print('NOT a stream : type=%d' % st_type)
print()
## for streamname in ole.listdir():
## # print name using repr() to convert binary chars to \xNN:
## print('-', repr('/'.join(streamname)),'-', end=' ')
## print(ole.getmtime(streamname))
## print()
print('Modification/Creation times of all directory entries:')
for entry in ole.direntries:
if entry is not None:
print('- %s: mtime=%s ctime=%s' % (entry.name,
entry.getmtime(), entry.getctime()))
print()
# parse and display metadata:
try:
meta = ole.get_metadata()
meta.dump()
except:
log.exception('Error while parsing metadata')
print()
#[PL] Test a few new methods:
root = ole.get_rootentry_name()
print('Root entry name: "%s"' % root)
if ole.exists('worddocument'):
print("This is a Word document.")
print("type of stream 'WordDocument':", ole.get_type('worddocument'))
print("size :", ole.get_size('worddocument'))
if ole.exists('macros/vba'):
print("This document may contain VBA macros.")
# print parsing issues:
print('\nNon-fatal issues raised during parsing:')
if ole.parsing_issues:
for exctype, msg in ole.parsing_issues:
print('- %s: %s' % (exctype.__name__, msg))
else:
print('None')
except:
log.exception('Error while parsing file %r' % filename)
# this code was developed while listening to The Wedding Present "Sea Monsters"
| gpl-3.0 | -7,636,307,375,128,598,000 | 44.182922 | 129 | 0.594105 | false |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/cms/templatetags/cms_tags.py | 3 | 41250 | # -*- coding: utf-8 -*-
from copy import copy
from itertools import chain
from datetime import datetime
from django.template.defaultfilters import safe
from classytags.arguments import Argument, MultiValueArgument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag, AsTag
from classytags.parser import Parser
from cms import __version__
from cms.exceptions import PlaceholderNotFound
from cms.models import Page, Placeholder as PlaceholderModel, CMSPlugin, StaticPlaceholder
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.utils.plugins import get_plugins, assign_plugins
from cms.utils import get_language_from_request, get_cms_setting, get_site_id
from cms.utils.compat.type_checks import string_types, int_types
from cms.utils.i18n import force_language
from cms.utils.moderator import use_draft
from cms.utils.page_resolver import get_page_queryset
from cms.utils.placeholder import validate_placeholder_name, get_toolbar_plugin_struct, restore_sekizai_context
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import mail_managers
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.encoding import smart_text
from django.utils.html import escape
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, get_language
import re
from sekizai.helpers import Watcher
from sekizai.templatetags.sekizai_tags import SekizaiParser, RenderBlock
register = template.Library()
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
CLEAN_KEY_PATTERN = re.compile(r'[^a-zA-Z0-9_-]')
def _clean_key(key):
return CLEAN_KEY_PATTERN.sub('-', key)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
page_key = _clean_key(page_key)
return get_cms_setting('CACHE_PREFIX') + name + '__page_lookup:' + page_key + '_site:' + str(site_id) + '_lang:' + str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
if request.current_page and request.current_page.pk == page_lookup.pk:
return request.current_page
return page_lookup
if isinstance(page_lookup, string_types):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, int_types):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
if 'pk' in page_lookup:
page = Page.objects.all().get(**page_lookup)
if request and use_draft(request):
if page.publisher_is_draft:
return page
else:
return page.publisher_draft
else:
if page.publisher_is_draft:
return page.publisher_public
else:
return page
else:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain': site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s") \
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
if settings.SEND_BROKEN_LINK_EMAILS:
mail_managers(subject, body, fail_silently=True)
return None
class PageUrl(AsTag):
name = 'page_url'
options = Options(
Argument('page_lookup'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
'as',
Argument('varname', required=False, resolve=False),
)
def get_value_for_context(self, context, **kwargs):
#
# A design decision with several active members of the django-cms
# community that using this tag with the 'as' breakpoint should never
# return Exceptions regardless of the setting of settings.DEBUG.
#
# We wish to maintain backwards functionality where the non-as-variant
# of using this tag will raise DNE exceptions only when
# settings.DEBUG=False.
#
try:
return super(PageUrl, self).get_value_for_context(context, **kwargs)
except Page.DoesNotExist:
return ''
def get_value(self, context, page_lookup, lang, site):
from django.core.cache import cache
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return ''
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id) + \
'_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url,
get_cms_setting('CACHE_DURATIONS')['content'])
if url:
return url
return ''
register.tag(PageUrl)
register.tag('page_id_url', PageUrl)
def _get_placeholder(current_page, page, context, name):
from django.core.cache import cache
placeholder_cache = getattr(current_page, '_tmp_placeholders_cache', {})
if page.pk in placeholder_cache:
placeholder = placeholder_cache[page.pk].get(name, None)
if placeholder:
return placeholder
placeholder_cache[page.pk] = {}
placeholders = page.rescan_placeholders().values()
fetch_placeholders = []
request = context['request']
if not get_cms_setting('PLACEHOLDER_CACHE') or (hasattr(request, 'toolbar') and request.toolbar.edit_mode):
fetch_placeholders = placeholders
else:
for placeholder in placeholders:
cache_key = placeholder.get_cache_key(get_language())
cached_value = cache.get(cache_key)
if not cached_value is None:
restore_sekizai_context(context, cached_value['sekizai'])
placeholder.content_cache = cached_value['content']
else:
fetch_placeholders.append(placeholder)
placeholder.cache_checked = True
if fetch_placeholders:
assign_plugins(context['request'], fetch_placeholders, page.get_template(), get_language())
for placeholder in placeholders:
placeholder_cache[page.pk][placeholder.slot] = placeholder
placeholder.page = page
current_page._tmp_placeholders_cache = placeholder_cache
placeholder = placeholder_cache[page.pk].get(name, None)
if page.application_urls and not placeholder:
raise PlaceholderNotFound(
'"%s" placeholder not found in an apphook application. Please use a static placeholder instead.' % name)
return placeholder
def get_placeholder_content(context, request, current_page, name, inherit, default):
from django.core.cache import cache
edit_mode = getattr(request, 'toolbar', None) and getattr(request.toolbar, 'edit_mode')
pages = [current_page]
# don't display inherited plugins in edit mode, so that the user doesn't
# mistakenly edit/delete them. This is a fix for issue #1303. See the discussion
# there for possible enhancements
if inherit and not edit_mode:
pages = chain([current_page], current_page.get_cached_ancestors(ascending=True))
for page in pages:
placeholder = _get_placeholder(current_page, page, context, name)
if placeholder is None:
continue
if not edit_mode and get_cms_setting('PLACEHOLDER_CACHE'):
if hasattr(placeholder, 'content_cache'):
return mark_safe(placeholder.content_cache)
if not hasattr(placeholder, 'cache_checked'):
cache_key = placeholder.get_cache_key(get_language())
cached_value = cache.get(cache_key)
if not cached_value is None:
restore_sekizai_context(context, cached_value['sekizai'])
return mark_safe(cached_value['content'])
if not get_plugins(request, placeholder, page.get_template()):
continue
content = render_placeholder(placeholder, context, name)
if content:
return content
# if we reach this point, we have an empty or non-existant placeholder
# call _get_placeholder again to get the placeholder properly rendered
# in frontend editing
placeholder = _get_placeholder(current_page, current_page, context, name)
return render_placeholder(placeholder, context, name, default=default)
class PlaceholderParser(Parser):
def parse_blocks(self):
for bit in getattr(self.kwargs['extra_bits'], 'value', self.kwargs['extra_bits']):
if getattr(bit, 'value', bit.var.value) == 'or':
return super(PlaceholderParser, self).parse_blocks()
return
class PlaceholderOptions(Options):
def get_parser_class(self):
return PlaceholderParser
class Placeholder(Tag):
"""
This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
name = 'placeholder'
options = PlaceholderOptions(
Argument('name', resolve=False),
MultiValueArgument('extra_bits', required=False, resolve=False),
blocks=[
('endplaceholder', 'nodelist'),
]
)
def render_tag(self, context, name, extra_bits, nodelist=None):
validate_placeholder_name(name)
width = None
inherit = False
for bit in extra_bits:
if bit == 'inherit':
inherit = True
elif bit.isdigit():
width = int(bit)
import warnings
warnings.warn(
"The width parameter for the placeholder tag is deprecated.",
DeprecationWarning
)
if not 'request' in context:
return ''
request = context['request']
if width:
context.update({'width': width})
page = request.current_page
if not page or page == 'dummy':
if nodelist:
return nodelist.render(context)
return ''
try:
content = get_placeholder_content(context, request, page, name, inherit, nodelist)
except PlaceholderNotFound:
if nodelist:
return nodelist.render(context)
raise
if not content:
if nodelist:
return nodelist.render(context)
return ''
return content
def get_name(self):
return self.kwargs['name'].var.value.strip('"').strip("'")
register.tag(Placeholder)
class RenderPlugin(InclusionTag):
template = 'cms/content.html'
name = 'render_plugin'
options = Options(
Argument('plugin')
)
def get_context(self, context, plugin):
# Prepend frontedit toolbar output if applicable
edit = False
if not plugin:
return {'content': ''}
request = context['request']
toolbar = getattr(request, 'toolbar', None)
page = request.current_page
if toolbar and toolbar.edit_mode and (not page or page.has_change_permission(request)):
edit = True
if edit:
from cms.middleware.toolbar import toolbar_plugin_processor
processors = (toolbar_plugin_processor,)
else:
processors = None
return {'content': plugin.render_plugin(context, processors=processors)}
register.tag(RenderPlugin)
class PluginChildClasses(InclusionTag):
"""
Accepts a placeholder or a plugin and renders the allowed plugins for this.
"""
template = "cms/toolbar/dragitem_menu.html"
name = "plugin_child_classes"
options = Options(
Argument('obj')
)
def get_context(self, context, obj):
# Prepend frontedit toolbar output if applicable
request = context['request']
page = request.current_page
child_plugin_classes = []
if isinstance(obj, CMSPlugin):
slot = context['slot']
plugin = obj
plugin_class = plugin.get_plugin_class()
if plugin_class.allow_children:
instance, plugin = plugin.get_plugin_instance()
childs = [plugin_pool.get_plugin(cls) for cls in plugin.get_child_classes(slot, page)]
# Builds the list of dictionaries containing module, name and value for the plugin dropdowns
child_plugin_classes = get_toolbar_plugin_struct(childs, slot, page, parent=plugin_class)
elif isinstance(obj, PlaceholderModel):
placeholder = obj
page = placeholder.page if placeholder else None
if not page:
page = getattr(request, 'current_page', None)
if placeholder:
slot = placeholder.slot
else:
slot = None
# Builds the list of dictionaries containing module, name and value for the plugin dropdowns
child_plugin_classes = get_toolbar_plugin_struct(plugin_pool.get_all_plugins(slot, page), slot, page)
return {'plugin_classes': child_plugin_classes}
register.tag(PluginChildClasses)
class PageAttribute(AsTag):
"""
This template node is used to output an attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" as varname %}
{% page_attribute "field-name" page_lookup %}
{% page_attribute "field-name" page_lookup as varname %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
{# Assign page_title attribute to a variable: #}
{% page_attribute "page_title" as title %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- changed_date
- changed_by
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
varname -- context variable name. Output will be added to template context as this variable.
This argument is required to follow the 'as' keyword.
"""
name = 'page_attribute'
options = Options(
Argument('name', resolve=False),
Argument('page_lookup', required=False, default=None),
'as',
Argument('varname', required=False, resolve=False)
)
valid_attributes = [
"title",
"slug",
"meta_description",
"page_title",
"menu_title",
"changed_date",
"changed_by",
]
def get_value(self, context, name, page_lookup):
if not 'request' in context:
return ''
name = name.lower()
request = context['request']
lang = get_language_from_request(request)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
if page == "dummy":
return ''
if page and name in self.valid_attributes:
func = getattr(page, "get_%s" % name)
ret_val = func(language=lang, fallback=True)
if not isinstance(ret_val, datetime):
ret_val = escape(ret_val)
return ret_val
return ''
register.tag(PageAttribute)
class CleanAdminListFilter(InclusionTag):
template = 'admin/filter.html'
name = 'clean_admin_list_filter'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices': unique_choices}
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup
arguments in the given language.
This is useful if you want to have some more or less static content that is
shared among many pages, such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types
and their interpretation for the page_lookup argument.
"""
from django.core.cache import cache
validate_placeholder_name(placeholder_name)
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
if cache_result:
base_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)
cache_key = _clean_key('%s_placeholder:%s' % (base_key, placeholder_name))
cached_value = cache.get(cache_key)
if cached_value:
restore_sekizai_context(context, cached_value['sekizai'])
return {'content': mark_safe(cached_value['content'])}
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
try:
placeholder = page.placeholders.get(slot=placeholder_name)
except PlaceholderModel.DoesNotExist:
if settings.DEBUG:
raise
return {'content': ''}
watcher = Watcher(context)
content = render_placeholder(placeholder, context, placeholder_name)
changes = watcher.get_changes()
if cache_result:
cache.set(cache_key, {'content': content, 'sekizai': changes}, get_cms_setting('CACHE_DURATIONS')['content'])
if content:
return {'content': mark_safe(content)}
return {'content': ''}
class ShowPlaceholderById(InclusionTag):
template = 'cms/content.html'
name = 'show_placeholder_by_id'
options = Options(
Argument('placeholder_name'),
Argument('reverse_id'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, *args, **kwargs):
return _show_placeholder_for_page(**self.get_kwargs(*args, **kwargs))
def get_kwargs(self, context, placeholder_name, reverse_id, lang, site):
cache_result = True
if 'preview' in context['request'].GET:
cache_result = False
return {
'context': context,
'placeholder_name': placeholder_name,
'page_lookup': reverse_id,
'lang': lang,
'site': site,
'cache_result': cache_result
}
register.tag(ShowPlaceholderById)
register.tag('show_placeholder', ShowPlaceholderById)
class ShowUncachedPlaceholderById(ShowPlaceholderById):
name = 'show_uncached_placeholder_by_id'
def get_kwargs(self, *args, **kwargs):
kwargs = super(ShowUncachedPlaceholderById, self).get_kwargs(*args, **kwargs)
kwargs['cache_result'] = False
return kwargs
register.tag(ShowUncachedPlaceholderById)
register.tag('show_uncached_placeholder', ShowUncachedPlaceholderById)
class CMSToolbar(RenderBlock):
name = 'cms_toolbar'
options = Options(
Argument('name', required=False), # just here so sekizai thinks this is a RenderBlock
parser_class=SekizaiParser,
)
def render_tag(self, context, name, nodelist):
# render JS
request = context.get('request', None)
toolbar = getattr(request, 'toolbar', None)
if toolbar:
toolbar.populate()
context['cms_toolbar_login_error'] = request.GET.get('cms-toolbar-login-error', False) == '1'
context['cms_version'] = __version__
if toolbar and toolbar.show_toolbar:
language = toolbar.toolbar_language
with force_language(language):
# needed to populate the context with sekizai content
render_to_string('cms/toolbar/toolbar_javascript.html', context)
clipboard = mark_safe(render_to_string('cms/toolbar/clipboard.html', context))
else:
language = None
clipboard = ''
# render everything below the tag
rendered_contents = nodelist.render(context)
# sanity checks
if not request:
return rendered_contents
if not toolbar:
return rendered_contents
if not toolbar.show_toolbar:
return rendered_contents
# render the toolbar content
request.toolbar.post_template_populate()
with force_language(language):
context['clipboard'] = clipboard
content = render_to_string('cms/toolbar/toolbar.html', context)
# return the toolbar content and the content below
return '%s\n%s' % (content, rendered_contents)
register.tag(CMSToolbar)
class CMSEditableObject(InclusionTag):
"""
Templatetag that links a content extracted from a generic django model
to the model admin changeform.
"""
template = 'cms/toolbar/content.html'
edit_template = 'cms/toolbar/plugin.html'
name = 'render_model'
options = Options(
Argument('instance'),
Argument('attribute'),
Argument('edit_fields', default=None, required=False),
Argument('language', default=None, required=False),
Argument('filters', default=None, required=False),
Argument('view_url', default=None, required=False),
Argument('view_method', default=None, required=False),
'as',
Argument('varname', required=False, resolve=False),
)
def __init__(self, parser, tokens):
self.parser = parser
super(CMSEditableObject, self).__init__(parser, tokens)
def _is_editable(self, request):
return (request and hasattr(request, 'toolbar') and
request.toolbar.edit_mode)
def get_template(self, context, **kwargs):
if self._is_editable(context.get('request', None)):
return self.edit_template
return self.template
def render_tag(self, context, **kwargs):
"""
Overridden from InclusionTag to push / pop context to avoid leaks
"""
context.push()
template = self.get_template(context, **kwargs)
data = self.get_context(context, **kwargs)
output = render_to_string(template, data)
context.pop()
if kwargs.get('varname'):
context[kwargs['varname']] = output
return ''
else:
return output
def _get_editable_context(self, context, instance, language, edit_fields,
view_method, view_url, querystring, editmode=True):
"""
Populate the contex with the requested attributes to trigger the changeform
"""
request = context['request']
if hasattr(request, 'toolbar'):
lang = request.toolbar.toolbar_language
else:
lang = get_language()
with force_language(lang):
extra_context = {}
if edit_fields == 'changelist':
instance.get_plugin_name = u"%s %s list" % (smart_text(_('Edit')), smart_text(instance._meta.verbose_name))
extra_context['attribute_name'] = 'changelist'
elif editmode:
instance.get_plugin_name = u"%s %s" % (smart_text(_('Edit')), smart_text(instance._meta.verbose_name))
else:
instance.get_plugin_name = u"%s %s" % (smart_text(_('Add')), smart_text(instance._meta.verbose_name))
extra_context['attribute_name'] = 'add'
extra_context['instance'] = instance
extra_context['generic'] = instance._meta
# view_method has the precedence and we retrieve the corresponding
# attribute in the instance class.
# If view_method refers to a method it will be called passing the
# request; if it's an attribute, it's stored for later use
if view_method:
method = getattr(instance, view_method)
if callable(method):
url_base = method(context['request'])
else:
url_base = method
else:
# The default view_url is the default admin changeform for the
# current instance
if not editmode:
view_url = 'admin:%s_%s_add' % (
instance._meta.app_label, instance._meta.module_name)
url_base = reverse(view_url)
elif not edit_fields:
view_url = 'admin:%s_%s_change' % (
instance._meta.app_label, instance._meta.module_name)
url_base = reverse(view_url, args=(instance.pk,))
else:
if not view_url:
view_url = 'admin:%s_%s_edit_field' % (
instance._meta.app_label, instance._meta.module_name)
if view_url.endswith('_changelist'):
url_base = reverse(view_url)
else:
url_base = reverse(view_url, args=(instance.pk, language))
querystring['edit_fields'] = ",".join(context['edit_fields'])
if editmode:
extra_context['edit_url'] = "%s?%s" % (url_base, urlencode(querystring))
else:
extra_context['edit_url'] = "%s" % url_base
extra_context['refresh_page'] = True
# We may be outside the CMS (e.g.: an application which is not attached via Apphook)
# in this case we may only go back to the home page
if getattr(context['request'], 'current_page', None):
extra_context['redirect_on_close'] = context['request'].current_page.get_absolute_url(language)
else:
extra_context['redirect_on_close'] = ''
return extra_context
def _get_content(self, context, instance, attribute, language, filters):
"""
Renders the requested attribute
"""
extra_context = copy(context)
if hasattr(instance, 'lazy_translation_getter'):
extra_context['content'] = instance.lazy_translation_getter(attribute, '')
else:
extra_context['content'] = getattr(instance, attribute, '')
# This allow the requested item to be a method, a property or an
# attribute
if callable(extra_context['content']):
if isinstance(instance, Page):
extra_context['content'] = extra_context['content'](language)
else:
extra_context['content'] = extra_context['content'](context['request'])
if filters:
expression = self.parser.compile_filter("content|%s" % (filters))
extra_context['content'] = expression.resolve(extra_context)
return extra_context
def _get_data_context(self, context, instance, attribute, edit_fields,
language, filters, view_url, view_method):
"""
Renders the requested attribute and attach changeform trigger to it
Uses `_get_empty_context`
"""
if not attribute:
return context
attribute = attribute.strip()
# ugly-ish
if isinstance(instance, Page):
if attribute == 'title':
attribute = 'get_title'
if not edit_fields:
edit_fields = 'title'
elif attribute == 'page_title':
attribute = 'get_page_title'
if not edit_fields:
edit_fields = 'page_title'
elif attribute == 'menu_title':
attribute = 'get_menu_title'
if not edit_fields:
edit_fields = 'menu_title'
elif attribute == 'titles':
attribute = 'get_title'
if not edit_fields:
edit_fields = 'title,page_title,menu_title'
view_url = 'admin:cms_page_edit_title_fields'
extra_context = copy(context)
extra_context['attribute_name'] = attribute
extra_context = self._get_empty_context(extra_context, instance,
edit_fields, language, view_url,
view_method)
extra_context.update(self._get_content(extra_context, instance, attribute,
language, filters))
# content is for non-edit template content.html
# rendered_content is for edit template plugin.html
# in this templatetag both hold the same content
extra_context['content'] = mark_safe(extra_context['content'])
extra_context['rendered_content'] = extra_context['content']
return extra_context
def _get_empty_context(self, context, instance, edit_fields, language,
view_url, view_method, editmode=True):
"""
Inject in a copy of the context the data requested to trigger the edit.
`content` and `rendered_content` is emptied.
"""
if not language:
language = get_language_from_request(context['request'])
# This allow the requested item to be a method, a property or an
# attribute
if not instance and editmode:
return context
extra_context = copy(context)
# ugly-ish
if instance and isinstance(instance, Page):
if edit_fields == 'titles':
edit_fields = 'title,page_title,menu_title'
view_url = 'admin:cms_page_edit_title_fields'
if edit_fields == 'changelist':
view_url = 'admin:cms_page_changelist'
querystring = {'language': language}
if edit_fields:
extra_context['edit_fields'] = edit_fields.strip().split(",")
# If the toolbar is not enabled the following part is just skipped: it
# would cause a perfomance hit for no reason
extra_context.update(context)
if self._is_editable(context.get('request', None)):
extra_context.update(self._get_editable_context(
extra_context, instance, language, edit_fields, view_method,
view_url, querystring, editmode))
# content is for non-edit template content.html
# rendered_content is for edit template plugin.html
# in this templatetag both hold the same content
extra_context['content'] = ''
extra_context['rendered_content'] = ''
return extra_context
def get_context(self, context, instance, attribute, edit_fields,
language, filters, view_url, view_method, varname):
"""
Uses _get_data_context to render the requested attributes
"""
extra_context = self._get_data_context(context, instance, attribute,
edit_fields, language, filters,
view_url, view_method)
extra_context['render_model'] = True
return extra_context
register.tag(CMSEditableObject)
class CMSEditableObjectIcon(CMSEditableObject):
"""
Templatetag that links a content extracted from a generic django model
to the model admin changeform.
The output of this templatetag is just an icon to trigger the changeform.
"""
name = 'render_model_icon'
options = Options(
Argument('instance'),
Argument('edit_fields', default=None, required=False),
Argument('language', default=None, required=False),
Argument('view_url', default=None, required=False),
Argument('view_method', default=None, required=False),
'as',
Argument('varname', required=False, resolve=False),
)
def get_context(self, context, instance, edit_fields, language,
view_url, view_method, varname):
"""
Uses _get_empty_context and adds the `render_model_icon` variable.
"""
extra_context = self._get_empty_context(context, instance, edit_fields,
language, view_url, view_method)
extra_context['render_model_icon'] = True
return extra_context
register.tag(CMSEditableObjectIcon)
class CMSEditableObjectAdd(CMSEditableObject):
"""
Templatetag that links a content extracted from a generic django model
to the model admin changeform.
The output of this templatetag is just an icon to trigger the changeform.
"""
name = 'render_model_add'
options = Options(
Argument('instance'),
Argument('language', default=None, required=False),
Argument('view_url', default=None, required=False),
Argument('view_method', default=None, required=False),
'as',
Argument('varname', required=False, resolve=False),
)
def get_context(self, context, instance, language,
view_url, view_method, varname):
"""
Uses _get_empty_context and adds the `render_model_icon` variable.
"""
extra_context = self._get_empty_context(context, instance, None,
language, view_url, view_method,
editmode=False)
extra_context['render_model_add'] = True
return extra_context
register.tag(CMSEditableObjectAdd)
class CMSEditableObjectBlock(CMSEditableObject):
"""
Templatetag that links a content extracted from a generic django model
to the model admin changeform.
The rendered content is to be specified in the enclosed block.
"""
name = 'render_model_block'
options = Options(
Argument('instance'),
Argument('edit_fields', default=None, required=False),
Argument('language', default=None, required=False),
Argument('view_url', default=None, required=False),
Argument('view_method', default=None, required=False),
'as',
Argument('varname', required=False, resolve=False),
blocks=[('endrender_model_block', 'nodelist')],
)
def render_tag(self, context, **kwargs):
"""
Renders the block and then inject the resulting HTML in the template
context
"""
context.push()
template = self.get_template(context, **kwargs)
data = self.get_context(context, **kwargs)
data['content'] = mark_safe(kwargs['nodelist'].render(data))
data['rendered_content'] = data['content']
output = render_to_string(template, data)
context.pop()
if kwargs.get('varname'):
context[kwargs['varname']] = output
return ''
else:
return output
def get_context(self, context, instance, edit_fields, language,
view_url, view_method, varname, nodelist):
"""
Uses _get_empty_context and adds the `instance` object to the local
context. Context here is to be intended as the context of the nodelist
in the block.
"""
extra_context = self._get_empty_context(context, instance, edit_fields,
language, view_url, view_method)
extra_context['instance'] = instance
extra_context['render_model_block'] = True
return extra_context
register.tag(CMSEditableObjectBlock)
class StaticPlaceholderNode(Tag):
name = 'static_placeholder'
options = PlaceholderOptions(
Argument('code', required=True),
MultiValueArgument('extra_bits', required=False, resolve=False),
blocks=[
('endstatic_placeholder', 'nodelist'),
]
)
def render_tag(self, context, code, extra_bits, nodelist=None):
# TODO: language override (the reason this is not implemented, is that language selection is buried way
# down somewhere in some method called in render_plugins. There it gets extracted from the request
# and a language in request.GET always overrides everything.)
if not code:
# an empty string was passed in or the variable is not available in the context
if nodelist:
return nodelist.render(context)
return ''
request = context.get('request', False)
if not request:
if nodelist:
return nodelist.render(context)
return ''
if isinstance(code, StaticPlaceholder):
static_placeholder = code
else:
if 'site' in extra_bits:
site = Site.objects.get_current()
static_placeholder, __ = StaticPlaceholder.objects.get_or_create(code=code, site_id=site.pk, defaults={'name': code,
'creation_method': StaticPlaceholder.CREATION_BY_TEMPLATE})
else:
static_placeholder, __ = StaticPlaceholder.objects.get_or_create(code=code, site_id__isnull=True, defaults={'name': code,
'creation_method': StaticPlaceholder.CREATION_BY_TEMPLATE})
if not hasattr(request, 'static_placeholders'):
request.static_placeholders = []
request.static_placeholders.append(static_placeholder)
if hasattr(request, 'toolbar') and request.toolbar.edit_mode:
placeholder = static_placeholder.draft
else:
placeholder = static_placeholder.public
placeholder.is_static = True
content = render_placeholder(placeholder, context, name_fallback=code, default=nodelist)
return content
register.tag(StaticPlaceholderNode)
class RenderPlaceholder(Tag):
name = 'render_placeholder'
options = Options(
Argument('placeholder'),
Argument('width', default=None, required=False),
'language',
Argument('language', default=None, required=False),
)
def render_tag(self, context, placeholder, width, language=None):
request = context.get('request', None)
if not request:
return ''
if not placeholder:
return ''
if not hasattr(request, 'placeholder'):
request.placeholders = []
request.placeholders.append(placeholder)
return safe(placeholder.render(context, width, lang=language))
register.tag(RenderPlaceholder)
| mit | 3,690,432,044,043,633,000 | 38.511494 | 137 | 0.610521 | false |
k0001/mediasancion | mediasancion/core/models.py | 1 | 4365 | # coding: utf-8
#
# MediaSanción, aplicación web para acceder a los datos públicos de la
# actividad legislativa en Argentina.
# Copyright (C) 2010,2011,2012 Renzo Carbonara <renzo @carbonara .com .ar>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import AutoSlugField, UUIDField
from mediasancion.utils.models import StandardAbstractModel
class Distrito(StandardAbstractModel):
uuid = UUIDField(version=4, unique=True, db_index=True)
nombre = models.CharField(max_length=128)
slug = AutoSlugField(populate_from='nombre')
def __unicode__(self):
return self.nombre
@models.permalink
def get_absolute_url(self):
return 'core:distritos:detail', (self.slug,)
@property
@models.permalink
def api0_url(self):
return 'api0:core:distritos:detail', (self.uuid,)
class Meta:
verbose_name = _(u"distrito")
verbose_name_plural = _(u"distritos")
class Partido(StandardAbstractModel):
uuid = UUIDField(version=4, unique=True, db_index=True)
nombre = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='nombre', overwrite=True)
def __unicode__(self):
return self.nombre
@property
@models.permalink
def api0_url(self):
return 'api0:core:partidos:detail', (self.uuid,)
class Meta:
verbose_name = _(u"partido político")
verbose_name_plural = _(u"partidos políticos")
class Bloque(StandardAbstractModel):
uuid = UUIDField(version=4, unique=True, db_index=True)
nombre = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='nombre', overwrite=True)
def __unicode__(self):
return self.nombre
@models.permalink
def get_absolute_url(self):
return 'core:bloques:detail', (self.slug,)
@property
@models.permalink
def api0_url(self):
return 'api0:core:bloques:detail', (self.uuid,)
class Meta:
verbose_name = _(u"bloque político")
verbose_name_plural = _(u"bloques políticos")
class Persona(StandardAbstractModel):
TIPO_DOCUMENTO_CHOICES = (
('D', _(u"D.N.I.")), )
uuid = UUIDField(version=4, unique=True, db_index=True)
slug = AutoSlugField(populate_from=('apellido', 'nombre'), overwrite=True)
nombre = models.CharField(max_length=128)
apellido = models.CharField(max_length=128)
documento_tipo = models.CharField(max_length=1, choices=TIPO_DOCUMENTO_CHOICES, null=True, blank=True)
documento_numero = models.CharField(max_length=63, null=True, blank=True)
email = models.EmailField(blank=True, null=True)
telefono = models.CharField(max_length=32, blank=True, null=True)
website = models.URLField(blank=True, null=True)
foto = models.ImageField(null=True, blank=True, upload_to='persona-foto/')
@property
def identity(self):
if self.documento_tipo and self.documento_numero:
return u'%s %s' % (self.get_documento_tipo_display, self.documento_numero)
@property
def full_name(self):
return u'%s, %s' % (self.apellido, self.nombre)
def __unicode__(self):
return self.full_name
@property
@models.permalink
def api0_url(self):
return 'api0:core:personas:detail', (self.uuid,)
@models.permalink
def get_absolute_url(self):
return 'core:personas:detail', (self.slug,)
def save(self, *args, **kwargs):
# Somehow forbid adding a partial identity
if self.documento_tipo or self.documento_numero:
assert self.documento_tipo and self.documento_numero
super(Persona, self).save(*args, **kwargs)
| agpl-3.0 | -1,767,903,761,686,009,600 | 32.523077 | 106 | 0.687701 | false |
michael-donat/ansible | lib/ansible/vars/__init__.py | 1 | 18914 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import defaultdict
from collections import MutableMapping
from jinja2.exceptions import UndefinedError
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.inventory.host import Host
from ansible.parsing import DataLoader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.debug import debug
from ansible.utils.vars import combine_vars
from ansible.vars.hostvars import HostVars
from ansible.vars.unsafe_proxy import UnsafeProxy
CACHED_VARS = dict()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
class VariableManager:
def __init__(self):
self._fact_cache = FactCache()
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
if play:
play_id = play._uuid
host_id = "NONE"
if host:
host_id = host.get_name()
task_id = "NONE"
if task:
task_id = task._uuid
return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id)
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
@extra_vars.setter
def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, MutableMapping)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
def _preprocess_vars(self, a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
'''
debug("in VariableManager get_vars()")
cache_entry = self._get_cache_entry(play=play, host=host, task=task)
if cache_entry in CACHED_VARS and use_cache:
debug("vars are cached, returning them now")
return CACHED_VARS[cache_entry]
all_vars = defaultdict(dict)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_default_vars())
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task and task._role is not None:
all_vars = combine_vars(all_vars, task._role.get_default_vars())
if host:
# next, if a host is specified, we load any vars from group_vars
# files and then any vars from host_vars files which may apply to
# this host or the groups it belongs to
# we merge in vars from groups specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_group_vars())
# then we merge in the special 'all' group_vars first, if they exist
if 'all' in self._group_vars_files:
data = preprocess_vars(self._group_vars_files['all'])
for item in data:
all_vars = combine_vars(all_vars, item)
for group in host.get_groups():
if group.name in self._group_vars_files and group.name != 'all':
for data in self._group_vars_files[group.name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# then we merge in vars from the host specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_vars())
# then we merge in the host_vars/<hostname> file, if it exists
host_name = host.get_name()
if host_name in self._host_vars_files:
for data in self._host_vars_files[host_name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# finally, the facts caches for this host, if it exists
try:
host_facts = self._fact_cache.get(host.name, dict())
for k in host_facts.keys():
if host_facts[k] is not None and not isinstance(host_facts[k], UnsafeProxy):
host_facts[k] = UnsafeProxy(host_facts[k])
all_vars = combine_vars(all_vars, host_facts)
except KeyError:
pass
if play:
all_vars = combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
try:
# create a set of temporary vars here, which incorporate the
# extra vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
templar = Templar(loader=loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = templar.template(vars_file_item)
if not isinstance(vars_file_list, list):
vars_file_list = [ vars_file_list ]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
for vars_file in vars_file_list:
data = preprocess_vars(loader.load_from_file(vars_file))
if data is not None:
for item in data:
all_vars = combine_vars(all_vars, item)
break
else:
raise AnsibleError("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
continue
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_vars())
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_vars())
all_vars = combine_vars(all_vars, task.get_vars())
if host:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
all_vars = combine_vars(all_vars, self._extra_vars)
# FIXME: make sure all special vars are here
# Finally, we create special vars
all_vars['playbook_dir'] = loader.get_basedir()
if host:
all_vars['group_names'] = [group.name for group in host.get_groups()]
if self._inventory is not None:
all_vars['groups'] = dict()
for (group_name, group) in self._inventory.groups.iteritems():
all_vars['groups'][group_name] = [h.name for h in group.get_hosts()]
if include_hostvars:
hostvars = HostVars(vars_manager=self, play=play, inventory=self._inventory, loader=loader)
all_vars['hostvars'] = hostvars
if task:
if task._role:
all_vars['role_path'] = task._role._role_path
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
if task.delegate_to is not None and include_delegate_to:
# we unfortunately need to template the delegate_to field here,
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
templar = Templar(loader=loader, variables=all_vars)
delegated_host_name = templar.template(task.delegate_to)
# now try to find the delegated-to host in inventory, or failing that,
# create a new host on the fly so we can fetch variables for it
delegated_host = None
if self._inventory is not None:
delegated_host = self._inventory.get_host(delegated_host_name)
# try looking it up based on the address field, and finally
# fall back to creating a host on the fly to use for the var lookup
if delegated_host is None:
for h in self._inventory.get_hosts(ignore_limits_and_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name or h.name in C.LOCALHOST and delegated_host_name in C.LOCALHOST:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
else:
delegated_host = Host(name=delegated_host_name)
# now we go fetch the vars for the delegated-to host and save them in our
# master dictionary of variables to be used later in the TaskExecutor/PlayContext
all_vars['ansible_delegated_vars'] = self.get_vars(loader=loader, play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False)
if self._inventory is not None:
all_vars['inventory_dir'] = self._inventory.basedir()
if play:
# add the list of hosts in the play, as adjusted for limit/filters
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
# however this would take work in the templating engine, so for now
# we'll add both so we can give users something transitional to use
host_list = [x.name for x in self._inventory.get_hosts()]
all_vars['play_hosts'] = host_list
all_vars['ansible_play_hosts'] = host_list
# the 'omit' value alows params to be left out if the variable they are based on is undefined
all_vars['omit'] = self._omit_token
all_vars['ansible_version'] = CLI.version_info(gitinfo=False)
if 'hostvars' in all_vars and host:
all_vars['vars'] = all_vars['hostvars'][host.get_name()]
#CACHED_VARS[cache_entry] = all_vars
debug("done with get_vars()")
return all_vars
def _get_inventory_basename(self, path):
'''
Returns the basename minus the extension of the given path, so the
bare filename can be matched against host/group names later
'''
(name, ext) = os.path.splitext(os.path.basename(path))
if ext not in ('.yml', '.yaml'):
return os.path.basename(path)
else:
return name
def _load_inventory_file(self, path, loader):
'''
helper function, which loads the file and gets the
basename of the file without the extension
'''
if loader.is_directory(path):
data = dict()
try:
names = loader.list_directory(path)
except os.error as err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
# evaluate files in a stable order rather than whatever
# order the filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
if results is not None:
data = combine_vars(data, results)
else:
file_name, ext = os.path.splitext(path)
data = None
if not ext or ext not in C.YAML_FILENAME_EXTENSIONS:
for test_ext in C.YAML_FILENAME_EXTENSIONS:
new_path = path + test_ext
if loader.path_exists(new_path):
data = loader.load_from_file(new_path)
break
else:
if loader.path_exists(path):
data = loader.load_from_file(path)
name = self._get_inventory_basename(path)
return (name, data)
def add_host_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._host_vars_files:
self._host_vars_files[name] = []
self._host_vars_files[name].append(data)
return data
else:
return dict()
def add_group_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._group_vars_files:
self._group_vars_files[name] = []
self._group_vars_files[name].append(data)
return data
else:
return dict()
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._fact_cache:
self._fact_cache[host.name] = facts
else:
try:
self._fact_cache[host.name].update(facts)
except KeyError:
self._fact_cache[host.name] = facts
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._nonpersistent_fact_cache:
self._nonpersistent_fact_cache[host.name] = facts
else:
try:
self._nonpersistent_fact_cache[host.name].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
self._vars_cache[host_name][varname] = value
| gpl-3.0 | -8,885,744,624,233,779,000 | 40.206972 | 175 | 0.580945 | false |
liiight/notifiers | tests/test_json_schema.py | 1 | 3283 | import hypothesis.strategies as st
import pytest
from hypothesis import given
from jsonschema import validate, ValidationError
from notifiers.utils.schema.formats import format_checker
from notifiers.utils.schema.helpers import one_or_more, list_to_commas
class TestFormats:
@pytest.mark.parametrize(
"formatter, value",
[
("iso8601", "2018-07-15T07:39:59+00:00"),
("iso8601", "2018-07-15T07:39:59Z"),
("iso8601", "20180715T073959Z"),
("rfc2822", "Thu, 25 Dec 1975 14:15:16 -0500"),
("ascii", "foo"),
("port", "44444"),
("port", 44_444),
("timestamp", 1531644024),
("timestamp", "1531644024"),
("e164", "+14155552671"),
("e164", "+442071838750"),
("e164", "+551155256325"),
],
)
def test_format_positive(self, formatter, value):
validate(value, {"format": formatter}, format_checker=format_checker)
def test_valid_file_format(self, tmpdir):
file_1 = tmpdir.mkdir("foo").join("file_1")
file_1.write("bar")
validate(str(file_1), {"format": "valid_file"}, format_checker=format_checker)
@pytest.mark.parametrize(
"formatter, value",
[
("iso8601", "2018-14-15T07:39:59+00:00"),
("iso8601", "2018-07-15T07:39:59Z~"),
("iso8601", "20180715T0739545639Z"),
("rfc2822", "Thu 25 Dec14:15:16 -0500"),
("ascii", "פו"),
("port", "70000"),
("port", 70_000),
("timestamp", "15565-5631644024"),
("timestamp", "155655631644024"),
("e164", "-14155552671"),
("e164", "+44207183875063673465"),
("e164", "+551155256325zdfgsd"),
],
)
def test_format_negative(self, formatter, value):
with pytest.raises(ValidationError):
validate(value, {"format": formatter}, format_checker=format_checker)
class TestSchemaUtils:
@pytest.mark.parametrize(
"input_schema, unique_items, min, max, data",
[
({"type": "string"}, True, 1, 1, "foo"),
({"type": "string"}, True, 1, 2, ["foo", "bar"]),
({"type": "integer"}, True, 1, 2, 1),
({"type": "integer"}, True, 1, 2, [1, 2]),
],
)
def test_one_or_more_positive(self, input_schema, unique_items, min, max, data):
expected_schema = one_or_more(input_schema, unique_items, min, max)
validate(data, expected_schema)
@pytest.mark.parametrize(
"input_schema, unique_items, min, max, data",
[
({"type": "string"}, True, 1, 1, 1),
({"type": "string"}, True, 1, 1, ["foo", "bar"]),
({"type": "integer"}, False, 3, None, [1, 1]),
({"type": "integer"}, True, 1, 1, [1, 2]),
],
)
def test_one_or_more_negative(self, input_schema, unique_items, min, max, data):
expected_schema = one_or_more(input_schema, unique_items, min, max)
with pytest.raises(ValidationError):
validate(data, expected_schema)
@given(st.lists(st.text()))
def test_list_to_commas(self, input_data):
assert list_to_commas(input_data) == ",".join(input_data)
| mit | -875,849,426,209,985,800 | 35.865169 | 86 | 0.541908 | false |
praekelt/txtalert | txtalert/apps/therapyedge/tests/importer.py | 1 | 28383 | from django.test import TestCase
from django.utils import timezone
from django.contrib.auth.models import User
from txtalert.apps.therapyedge.importer import Importer, SEX_MAP
from txtalert.apps.therapyedge.xmlrpc import client
from txtalert.core.models import Patient, MSISDN, Visit, Clinic
from txtalert.apps.therapyedge.tests.utils import (PatientUpdate, ComingVisit, MissedVisit,
DoneVisit, DeletedVisit, create_instance)
from datetime import datetime, timedelta, date
import random
import logging
import iso8601
class ImporterTestCase(TestCase):
"""Testing the TherapyEdge import loop"""
fixtures = ['patients', 'clinics']
def setUp(self):
self.importer = Importer()
# make sure we're actually testing some data
self.assertTrue(Patient.objects.count() > 0)
self.clinic = Clinic.objects.all()[0]
self.user = User.objects.get(username="kumbu")
def tearDown(self):
pass
def test_update_local_patients(self):
"""Test the mapping of the incoming Patient objects to local copies"""
# mock received data from TherapyEdge XML-RPC
data = [(
'', # dr_site_id
'', # dr_site_name
'%s' % idx, # age, as string
random.choice(SEX_MAP.keys()), # sex
'2712345678%s' % idx, # celphone
random.choice(('true','false')), # dr_status
'02-7012%s' % idx # te_id
) for idx in range(0, 10)]
updated_patients = map(PatientUpdate._make, data)
local_patients = list(self.importer.update_local_patients(self.user, updated_patients))
self.assertEquals(len(local_patients), 10)
for updated_patient in updated_patients:
local_patient = Patient.objects.get(te_id=updated_patient.te_id)
# check for msisdn
msisdn = MSISDN.objects.get(msisdn=updated_patient.celphone)
self.assertTrue(msisdn in local_patient.msisdns.all())
# check for age
self.assertEquals(local_patient.age, int(updated_patient.age))
# check for sex
self.assertEquals(local_patient.sex, SEX_MAP[updated_patient.sex])
# check for te_id
self.assertEquals(local_patient.te_id, updated_patient.te_id)
def test_update_local_coming_visits(self):
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
'2009-11-1%s 00:00:00' % idx, # scheduled_visit_date
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for coming_visit in coming_visits:
# don't need to test this as Django does this for us
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals(
iso8601.parse_date(coming_visit.scheduled_visit_date).date(),
local_visit.date
)
def test_update_local_missed_visits(self):
data = [(
'', # dr_site_name
'', # dr_site_id
'2009-11-1%s 00:00:00' % idx, # missed_date
'', # dr_status
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
missed_visits = map(MissedVisit._make, data)
local_visits = set(self.importer.update_local_missed_visits(
self.user,
self.clinic,
missed_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for missed_visit in missed_visits:
local_visit = Visit.objects.get(te_visit_id=missed_visit.key_id)
self.assertEquals(
iso8601.parse_date(missed_visit.missed_date).date(),
local_visit.date
)
def test_missed_visits(self):
# helper methods
def make_visit(named_tuple_klass, dictionary):
return named_tuple_klass._make(named_tuple_klass._fields) \
._replace(**dictionary)
# mock patient
patient = Patient.objects.all()[0]
# create a visit that's already been scheduled earlier, mock a
# previous import
visit = patient.visit_set.create(
te_visit_id='02-002173383',
date=date.today(),
status='s',
clinic=self.clinic
)
# create a missed visit
missed_visit = make_visit(MissedVisit, {
'dr_site_name': '',
'dr_site_id': '',
'dr_status': 'false',
'missed_date': '%s 00:00:00' % date.today(),
'key_id': '02-002173383',
'te_id': patient.te_id
})
# import the data
list(self.importer.update_local_missed_visits(self.user, self.clinic, [missed_visit]))
# get the visit and check its status
visit = patient.visit_set.get(te_visit_id='02-002173383')
self.assertEquals(visit.status, 'm')
def test_update_local_reschedules_from_missed(self):
"""missed visits in the future are reschedules"""
future_date = date.today() + timedelta(days=7) # one week ahead
# first plan the scheduleds
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
# scheduled_visit_date, force to start one day ahead of today
# to make sure they're always future dates
'%s 00:00:00' % (date.today() + timedelta(days=(idx+1))),
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for coming_visit in coming_visits:
# don't need to test this as Django does this for us
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals('s', local_visit.status)
# now plan the future misseds, should be reschedules
data = [(
'', # dr_site_name
'', # dr_site_id
'%s 00:00:00' % future_date, # missed_date
'', # dr_status
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
rescheduled_visits = map(MissedVisit._make, data)
local_visits = set(self.importer.update_local_missed_visits(
self.user,
self.clinic,
rescheduled_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for rescheduled_visit in rescheduled_visits:
local_visit = Visit.objects.get(te_visit_id=rescheduled_visit.key_id)
self.assertEquals(local_visit.status, 'r')
def test_update_local_reschedules_from_coming(self):
"""future visits that get a new date in the future are reschedules"""
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
# scheduled_visit_date, force to start one day ahead of today
# to make sure they're always future dates
'%s 00:00:00' % (date.today() + timedelta(days=(idx+1))),
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for coming_visit in coming_visits:
# don't need to test this as Django does this for us
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals('s', local_visit.status)
# send in a batch of future coming visits to mimick reschedules
future_date = date.today() + timedelta(days=7) # one week ahead
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
'%s 00:00:00' % future_date, # scheduled_visit_date
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
set(self.importer.update_local_coming_visits(self.user, self.clinic, coming_visits))
for coming_visit in coming_visits:
local_visit = Visit.objects.get(te_visit_id=coming_visit.key_id)
self.assertEquals('r', local_visit.status)
def test_update_local_done_visits(self):
data = [(
'2009-11-1%s 00:00:00' % idx, # done_date
'', # dr_site_id
'', # dr_status
'', # dr_site_name
'2009-10-1%s 00:00:00' % idx, # scheduled_date, mocked to be a month earlier
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
done_visits = map(DoneVisit._make, data)
local_visits = set(self.importer.update_local_done_visits(
self.user,
self.clinic,
done_visits
))
self.assertEquals(len(local_visits), Patient.objects.count())
for done_visit in done_visits:
local_visit = Visit.objects.get(te_visit_id=done_visit.key_id)
# the visit should have the same done date
self.assertEquals(
iso8601.parse_date(done_visit.done_date).date(),
local_visit.date
)
# the visit should have the status of a, 'attended'
self.assertEquals(
local_visit.status,
'a'
)
def test_update_local_deleted_visits(self):
# first create the visit events to be deleted
data = [(
'', # dr_site_name
'', # dr_site_id
'false', # dr_status
'2009-11-1%s 00:00:00' % idx, # scheduled_visit_date
'02-00089421%s' % idx, # key_id
patient.te_id, # te_id
) for idx, patient in enumerate(Patient.objects.all())]
coming_visits = map(ComingVisit._make, data)
local_visits = set(self.importer.update_local_coming_visits(
self.user,
self.clinic,
coming_visits
))
self.assertEquals(len(coming_visits), len(local_visits))
data = [(
'02-00089421%s' % idx, # key_id
'false', # dr_status
'', # dr_site_id
patient.te_id, # te_id
'', # dr_site_name
) for idx, patient in enumerate(Patient.objects.all())]
deleted_visits = map(DeletedVisit._make, data)
# use list comprihensions because set() dedupes the list and for some
# reason it considers deleted the deleted django objects as dupes
# and returns a list of one
local_visits = [v for v in self.importer.update_local_deleted_visits(
self.user,
deleted_visits
)]
self.assertEquals(len(local_visits), Patient.objects.count())
for deleted_visit in deleted_visits:
self.assertEquals(
Visit.objects.filter(te_visit_id=deleted_visit.key_id).count(),
0
)
def test_for_history_duplication(self):
"""
Test for history duplication happening after numerous imports over time
The data for this test has been gleaned from the txtalert log being
used in production. For some reason imports that should be 'missed'
are set as 'rescheduled' and eventhough nothing changes in the
appointment, a historical visit is still saved.
"""
# create the patient for which we'll get the visits
patient = Patient.objects.create(te_id='02-82088', age=29, sex='m',
owner=self.user)
# importer
importer = Importer()
# [importer] 2010-03-18 08:00:37,705 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-18 08:01:39,354 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-19 08:00:36,876 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-19 08:01:36,747 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-20 08:00:29,600 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-20 08:01:30,926 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-21 08:00:28,052 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-21 08:01:33,909 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-22 08:00:27,711 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-22 08:01:33,549 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-23 08:00:26,453 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-23 08:01:36,731 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
# [importer] 2010-03-25 09:00:41,774 DEBUG Processing coming Visit {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'}
coming_visit = create_instance(ComingVisit, {'dr_site_name': '', 'dr_site_id': '', 'dr_status': 'false', 'scheduled_visit_date': '2010-03-24 00:00:00', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_coming_visit = importer.update_local_coming_visit(self.user, self.clinic, coming_visit)
# [importer] 2010-03-25 09:00:41,850 DEBUG Updating existing Visit: 37361 / ({'date': datetime.date(2010, 3, 24), 'updated_at': datetime.datetime(2010, 3, 23, 8, 1, 36)} vs {'status': u'r', 'comment': u'', 'visit_type': u'', 'deleted': 0, 'created_at': datetime.datetime(2010, 3, 18, 8, 0, 37), 'updated_at': datetime.datetime(2010, 3, 23, 8, 1, 36), 'te_visit_id': u'02-091967084', 'date': datetime.date(2010, 3, 24), 'id': 37361L})
# [importer] 2010-03-25 09:01:40,902 DEBUG Processing missed Visit: {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'}
missed_visit = create_instance(MissedVisit, {'dr_site_name': '', 'dr_site_id': '', 'missed_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_missed_visit = importer.update_local_missed_visit(self.user, self.clinic, missed_visit)
visit = patient.visit_set.latest()
self.assertEquals(visit.status, 'm')
self.assertEquals(visit.history.count(), 1)
done_visit = create_instance(DoneVisit, {'dr_site_name': '', 'dr_site_id': '', 'done_date': '2010-03-24 00:00:00', 'scheduled_date': '2010-03-24 00:00:00', 'dr_status': 'false', 'key_id': '02-091967084', 'te_id': '02-82088'})
local_done_visit = importer.update_local_done_visit(self.user, self.clinic, done_visit)
visit = patient.visit_set.latest()
self.assertEquals(visit.status, 'a')
self.assertEquals(visit.history.count(), 2)
class PatchedClient(client.Client):
def __init__(self, **kwargs):
self.patches = kwargs
def mocked_patients_data(self, request, *args, **kwargs):
"""Mocking the response we get from TherapyEdge for a patients_update
call"""
logging.debug('Mocked rpc_call called with: %s, %s, %s' % (request, args, kwargs))
return self.patches[request]
class ImporterXmlRpcClientTestCase(TestCase):
fixtures = ['patients', 'clinics']
def setUp(self):
self.importer = Importer()
self.user = User.objects.get(username="kumbu")
# patching the client to automatically return our specified result
# sets without doing an XML-RPC call
patched_client = PatchedClient(
patients_update=[{
'dr_site_name': '',
'dr_site_id': '',
'age': '2%s' % i,
'sex': random.choice(['Male', 'Female']),
'celphone': '2712345678%s' % i,
'dr_status': '',
'te_id': patient.te_id,
} for i, patient in enumerate(Patient.objects.all())],
comingvisits=[{
'dr_site_name': '',
'dr_site_id': '',
'dr_status': '',
'scheduled_visit_date': str(timezone.now() + timedelta(days=2)),
'key_id': '02-1234%s' % i,
'te_id': patient.te_id,
} for i, patient in enumerate(Patient.objects.all())],
missedvisits=[{
'dr_site_name': '',
'dr_site_id': '',
'missed_date': str(timezone.now() - timedelta(days=2)),
'dr_status': '',
'key_id': '03-1234%s' % i,
'te_id': patient.te_id
} for i, patient in enumerate(Patient.objects.all())],
donevisits=[{
'done_date': str(timezone.now() - timedelta(days=2)),
'dr_site_id': '',
'dr_status': '',
'dr_site_name': '',
'scheduled_date': str(timezone.now() - timedelta(days=2)),
'key_id': '04-1234%s' % i,
'te_id': patient.te_id
} for i, patient in enumerate(Patient.objects.all())],
deletedvisits=[{
'key_id': '02-1234%s' % i,
'dr_status': '',
'dr_site_id': '',
'te_id': patient.te_id,
'dr_site_name': ''
} for i, patient in enumerate(Patient.objects.all())]
)
# monkey patching
self.importer.client.server.patients_data = patched_client.mocked_patients_data
self.clinic = Clinic.objects.all()[0] # make sure we have a clinic
self.assertTrue(Patient.objects.count()) # make sure our fixtures aren't empty
def tearDown(self):
pass
def test_import_updated_patients(self):
"""The xmlrpc client is largely some boilterplate code and some little
helpers that transform the returned Dict into class instances. We're
testing that functionality here. Since all the stuff uses the same boiler
plate code we're only testing it for one method call.
"""
updated_patients = self.importer.import_updated_patients(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now()
)
updated_patients = list(updated_patients)
self.assertTrue(len(updated_patients), Patient.objects.count())
self.assertTrue(isinstance(updated_patients[0], Patient))
def test_import_coming_visits(self):
coming_visits = self.importer.import_coming_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
)
coming_visits = list(coming_visits)
self.assertEquals(len(coming_visits), Patient.objects.count())
self.assertTrue(isinstance(coming_visits[0], Visit))
def test_missed_visits(self):
missed_visits = self.importer.import_missed_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
visit_type=3 # Medical Visit
)
missed_visits = list(missed_visits)
self.assertEquals(len(missed_visits), Patient.objects.count())
self.assertTrue(isinstance(missed_visits[0], Visit))
def test_done_visits(self):
done_visits = self.importer.import_done_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
)
done_visits = list(done_visits)
self.assertEquals(len(done_visits), Patient.objects.count())
self.assertTrue(isinstance(done_visits[0], Visit))
def test_deleted_visits(self):
# first have some coming visits
coming_visits = list(self.importer.import_coming_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
))
# then mark them as deleted, they're matched because they
# have the same key_id
deleted_visits = list(self.importer.import_deleted_visits(
user=self.user,
clinic=self.clinic,
since=(timezone.now() - timedelta(days=1)),
until=timezone.now(),
visit_type=3 # Medical Visit
))
self.assertEquals(len(deleted_visits), Patient.objects.count())
self.assertTrue(isinstance(deleted_visits[0], Visit))
| gpl-3.0 | 967,314,180,620,399,400 | 53.373563 | 441 | 0.558257 | false |
aequitas/home-assistant | homeassistant/components/temper/sensor.py | 7 | 3307 | """Support for getting temperature from TEMPer devices."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME, TEMP_FAHRENHEIT
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_SCALE = 'scale'
CONF_OFFSET = 'offset'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): vol.Coerce(str),
vol.Optional(CONF_SCALE, default=1): vol.Coerce(float),
vol.Optional(CONF_OFFSET, default=0): vol.Coerce(float)
})
TEMPER_SENSORS = []
def get_temper_devices():
"""Scan the Temper devices from temperusb."""
from temperusb.temper import TemperHandler
return TemperHandler().get_devices()
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Temper sensors."""
temp_unit = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
scaling = {
'scale': config.get(CONF_SCALE),
'offset': config.get(CONF_OFFSET)
}
temper_devices = get_temper_devices()
for idx, dev in enumerate(temper_devices):
if idx != 0:
name = name + '_' + str(idx)
TEMPER_SENSORS.append(TemperSensor(dev, temp_unit, name, scaling))
add_entities(TEMPER_SENSORS)
def reset_devices():
"""
Re-scan for underlying Temper sensors and assign them to our devices.
This assumes the same sensor devices are present in the same order.
"""
temper_devices = get_temper_devices()
for sensor, device in zip(TEMPER_SENSORS, temper_devices):
sensor.set_temper_device(device)
class TemperSensor(Entity):
"""Representation of a Temper temperature sensor."""
def __init__(self, temper_device, temp_unit, name, scaling):
"""Initialize the sensor."""
self.temp_unit = temp_unit
self.scale = scaling['scale']
self.offset = scaling['offset']
self.current_value = None
self._name = name
self.set_temper_device(temper_device)
@property
def name(self):
"""Return the name of the temperature sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self.temp_unit
def set_temper_device(self, temper_device):
"""Assign the underlying device for this sensor."""
self.temper_device = temper_device
# set calibration data
self.temper_device.set_calibration_data(
scale=self.scale,
offset=self.offset
)
def update(self):
"""Retrieve latest state."""
try:
format_str = ('fahrenheit' if self.temp_unit == TEMP_FAHRENHEIT
else 'celsius')
sensor_value = self.temper_device.get_temperature(format_str)
self.current_value = round(sensor_value, 1)
except IOError:
_LOGGER.error("Failed to get temperature. The device address may"
"have changed. Attempting to reset device")
reset_devices()
| apache-2.0 | 8,785,866,354,727,695,000 | 30.798077 | 79 | 0.643484 | false |
miniconfig/home-assistant | tests/components/switch/test_init.py | 23 | 3411 | """The tests for the Switch component."""
# pylint: disable=protected-access
import unittest
from homeassistant.setup import setup_component
from homeassistant import loader
from homeassistant.components import switch
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from tests.common import get_test_home_assistant
class TestSwitch(unittest.TestCase):
"""Test the switch module."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
platform = loader.get_component('switch.test')
platform.init()
# Switch 1 is ON, switch 2 is OFF
self.switch_1, self.switch_2, self.switch_3 = \
platform.DEVICES
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_methods(self):
"""Test is_on, turn_on, turn_off methods."""
self.assertTrue(setup_component(
self.hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: 'test'}}
))
self.assertTrue(switch.is_on(self.hass))
self.assertEqual(
STATE_ON,
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state)
self.assertTrue(switch.is_on(self.hass, self.switch_1.entity_id))
self.assertFalse(switch.is_on(self.hass, self.switch_2.entity_id))
self.assertFalse(switch.is_on(self.hass, self.switch_3.entity_id))
switch.turn_off(self.hass, self.switch_1.entity_id)
switch.turn_on(self.hass, self.switch_2.entity_id)
self.hass.block_till_done()
self.assertTrue(switch.is_on(self.hass))
self.assertFalse(switch.is_on(self.hass, self.switch_1.entity_id))
self.assertTrue(switch.is_on(self.hass, self.switch_2.entity_id))
# Turn all off
switch.turn_off(self.hass)
self.hass.block_till_done()
self.assertFalse(switch.is_on(self.hass))
self.assertEqual(
STATE_OFF,
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state)
self.assertFalse(switch.is_on(self.hass, self.switch_1.entity_id))
self.assertFalse(switch.is_on(self.hass, self.switch_2.entity_id))
self.assertFalse(switch.is_on(self.hass, self.switch_3.entity_id))
# Turn all on
switch.turn_on(self.hass)
self.hass.block_till_done()
self.assertTrue(switch.is_on(self.hass))
self.assertEqual(
STATE_ON,
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state)
self.assertTrue(switch.is_on(self.hass, self.switch_1.entity_id))
self.assertTrue(switch.is_on(self.hass, self.switch_2.entity_id))
self.assertTrue(switch.is_on(self.hass, self.switch_3.entity_id))
def test_setup_two_platforms(self):
"""Test with bad configuration."""
# Test if switch component returns 0 switches
test_platform = loader.get_component('switch.test')
test_platform.init(True)
loader.set_component('switch.test2', test_platform)
test_platform.init(False)
self.assertTrue(setup_component(
self.hass, switch.DOMAIN, {
switch.DOMAIN: {CONF_PLATFORM: 'test'},
'{} 2'.format(switch.DOMAIN): {CONF_PLATFORM: 'test2'},
}
))
| mit | 8,759,282,363,653,833,000 | 35.677419 | 78 | 0.638229 | false |
davidrobles/mlnd-capstone-code | experiments/c4_ql_tab_simple_selfplay.py | 1 | 1783 | '''
The Q-learning algorithm is used to estimate the state-action values for a
simple Connect 4 position by playing games against itself (self-play).
'''
from capstone.game.games import Connect4
from capstone.game.players import RandPlayer
from capstone.game.utils import c42pdf
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import QLearningSelfPlay
from capstone.rl.policies import RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter, QValuesPlotter
from capstone.rl.value_functions import TabularQ
seed = 23
board = [['X', 'O', 'O', ' ', 'O', ' ', ' '],
['X', 'O', 'X', ' ', 'X', ' ', ' '],
['O', 'X', 'O', 'X', 'O', 'X', 'O'],
['O', 'X', 'O', 'X', 'O', 'X', 'O'],
['X', 'O', 'X', 'O', 'X', 'O', 'X'],
['X', 'O', 'X', 'O', 'X', 'O', 'X']]
game = Connect4(board)
mdp = GameMDP(game)
env = Environment(mdp)
qlearning = QLearningSelfPlay(
env=env,
qfunction=TabularQ(random_state=seed),
policy=RandomPolicy(env.actions, random_state=seed),
learning_rate=0.1,
discount_factor=1.0,
n_episodes=4000,
)
qlearning.train(
callbacks=[
QValuesPlotter(
state=game,
actions=game.legal_moves(),
filepath='figures/c4_ql_tab_simple_selfplay_progress.pdf'
)
]
)
####################
# Generate figures #
####################
c42pdf('figures/c4_ql_tab_simple_selfplay_cur.pdf', game.board)
for move in game.legal_moves():
print('*' * 80)
value = qlearning.qfunction[(game, move)]
print('Move: {}'.format(move))
print('Value: %f' % value)
new_game = game.copy().make_move(move)
print(new_game)
filename = 'figures/c4_ql_tab_simple_selfplay_move_{}.pdf'.format(move)
c42pdf(filename, new_game.board)
| mit | -2,542,293,844,128,585,000 | 30.280702 | 75 | 0.607964 | false |
byterom/android_external_chromium_org | tools/checklicenses/checklicenses.py | 25 | 16386 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Anti-Grain Geometry',
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'FreeType (BSD like)',
'FreeType (BSD like) with patent clause',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'Independent JPEG Group License',
'ISC',
'LGPL (unversioned/unknown version)',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v2.1 or later)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v2.0)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'SunSoft (BSD like)',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
# This contains files copied from elsewhere from the tree. Since the copied
# directories might have suppressions below (like simplejson), whitelist the
# whole directory. This is also not shipped code.
'chrome/common/extensions/docs/server2/third_party': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'third_party/clang_format/script': [
'UNKNOWN',
],
# http://crbug.com/333508
'buildtools/clang_format/script': [
'UNKNOWN',
],
# https://mail.python.org/pipermail/cython-devel/2014-July/004062.html
'third_party/cython': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libc++/trunk/include/support/solaris': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libc++/trunk/src/support/solaris/xlocale.c': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
# Many liblouis files are mirrored but not used in the NaCl module.
# They are not excluded from the mirror because of lack of infrastructure
# support. Getting license headers added to the files where missing is
# tracked in https://github.com/liblouis/liblouis/issues/22.
'third_party/liblouis/src': [
'GPL (v3 or later)',
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/boringssl': [
# There are some files in BoringSSL which came from OpenSSL and have no
# license in them. We don't wish to add the license header ourselves
# thus we don't expect to pass license checks.
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# http://crbug.com/222831
# https://bitbucket.org/eliben/pyelftools/issue/12
'third_party/pyelftools': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'v8/src/third_party/kernel/tools/perf/util/jitdump.h': [ # http://crbug.com/391716
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause | -379,906,135,795,255,300 | 29.010989 | 97 | 0.573233 | false |
vishnu-kumar/PeformanceFramework | tests/unit/cli/commands/test_task.py | 6 | 35281 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime as date
import os.path
import mock
from rally.cli.commands import task
from rally import consts
from rally import exceptions
from tests.unit import fakes
from tests.unit import test
class TaskCommandsTestCase(test.TestCase):
def setUp(self):
super(TaskCommandsTestCase, self).setUp()
self.task = task.TaskCommands()
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task(self, mock_open):
input_task = "{'ab': {{test}}}"
input_args = "{'test': 2}"
# NOTE(boris-42): Such order of files is because we are reading
# file with args before file with template.
mock_open.side_effect = [
mock.mock_open(read_data="{'test': 1}").return_value,
mock.mock_open(read_data=input_task).return_value
]
task_conf = self.task._load_task(
"in_task", task_args_file="in_args_path")
self.assertEqual({"ab": 1}, task_conf)
mock_open.side_effect = [
mock.mock_open(read_data=input_task).return_value
]
task_conf = self.task._load_task(
"in_task", task_args=input_args)
self.assertEqual(task_conf, {"ab": 2})
mock_open.side_effect = [
mock.mock_open(read_data="{'test': 1}").return_value,
mock.mock_open(read_data=input_task).return_value
]
task_conf = self.task._load_task(
"in_task", task_args=input_args, task_args_file="any_file")
self.assertEqual(task_conf, {"ab": 2})
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_wrong_task_args_file(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task,
"in_task", task_args_file="in_args_path")
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_wrong_task_args_file_exception(self, mock_open):
mock_open.side_effect = IOError
self.assertRaises(IOError, self.task._load_task,
"in_task", task_args_file="in_args_path")
def test__load_task_wrong_input_task_args(self):
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task",
"{'test': {}")
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task", "[]")
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_task_render_raise_exc(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {{t}}}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task")
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_task_not_in_yaml(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task")
def test_load_task_including_other_template(self):
other_template_path = os.path.join(
os.path.dirname(__file__),
"..", "..", "..", "..", "samples/tasks/scenarios/nova/boot.json")
input_task = "{%% include \"%s\" %%}" % os.path.basename(
other_template_path)
expect = self.task._load_task(other_template_path)
with mock.patch("rally.cli.commands.task.open",
create=True) as mock_open:
mock_open.side_effect = [
mock.mock_open(read_data=input_task).return_value
]
input_task_file = os.path.join(
os.path.dirname(other_template_path), "input_task.json")
actual = self.task._load_task(input_task_file)
self.assertEqual(expect, actual)
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.api.Task.validate",
return_value=fakes.FakeTask())
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value={"uuid": "some_uuid"})
def test__load_and_validate_task(self, mock__load_task,
mock_task_validate, mock_os_path_exists):
deployment = "some_deployment_uuid"
self.task._load_and_validate_task("some_task", "task_args",
"task_args_file", deployment)
mock__load_task.assert_called_once_with("some_task", "task_args",
"task_args_file")
mock_task_validate.assert_called_once_with(
deployment, mock__load_task.return_value, None)
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.os.path.isdir", return_value=True)
@mock.patch("rally.cli.commands.task.TaskCommands._load_task")
@mock.patch("rally.api.Task.validate")
def test__load_and_validate_directory(self, mock_task_validate,
mock__load_task, mock_os_path_isdir,
mock_os_path_exists):
deployment = "some_deployment_uuid"
self.assertRaises(IOError, self.task._load_and_validate_task,
"some_task", "task_args",
"task_args_file", deployment)
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.os.path.isdir", return_value=False)
@mock.patch("rally.cli.commands.task.api.Task.create",
return_value=fakes.FakeTask(uuid="some_new_uuid", tag="tag"))
@mock.patch("rally.cli.commands.task.TaskCommands.use")
@mock.patch("rally.cli.commands.task.TaskCommands.detailed")
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value={"some": "json"})
@mock.patch("rally.cli.commands.task.api.Task.validate",
return_value=fakes.FakeTask(some="json", uuid="some_uuid",
temporary=True))
@mock.patch("rally.cli.commands.task.api.Task.start")
def test_start(self, mock_task_start, mock_task_validate, mock__load_task,
mock_detailed, mock_use, mock_task_create,
mock_os_path_isdir, mock_os_path_exists):
deployment_id = "e0617de9-77d1-4875-9b49-9d5789e29f20"
task_path = "path_to_config.json"
self.task.start(task_path, deployment_id, do_use=True)
mock_task_create.assert_called_once_with(
deployment_id, None)
mock_task_start.assert_called_once_with(
deployment_id, mock__load_task.return_value,
task=mock_task_validate.return_value, abort_on_sla_failure=False)
mock__load_task.assert_called_once_with(task_path, None, None)
mock_use.assert_called_once_with("some_new_uuid")
mock_detailed.assert_called_once_with(task_id="some_new_uuid")
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.os.path.isdir", return_value=False)
@mock.patch("rally.cli.commands.task.api.Task.create",
return_value=fakes.FakeTask(uuid="new_uuid", tag="some_tag"))
@mock.patch("rally.cli.commands.task.TaskCommands.detailed")
@mock.patch("rally.cli.commands.task.api.Task.start")
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value="some_config")
@mock.patch("rally.cli.commands.task.api.Task.validate",
return_value=fakes.FakeTask(uuid="some_id"))
def test_start_with_task_args(self, mock_task_validate, mock__load_task,
mock_task_start, mock_detailed,
mock_task_create, mock_os_path_isdir,
mock_os_path_exists):
task_path = mock.MagicMock()
task_args = mock.MagicMock()
task_args_file = mock.MagicMock()
self.task.start(task_path, deployment="any", task_args=task_args,
task_args_file=task_args_file, tag="some_tag")
mock__load_task.assert_called_once_with(task_path, task_args,
task_args_file)
mock_task_validate.assert_called_once_with(
"any", mock__load_task.return_value, {})
mock_task_start.assert_called_once_with(
"any", mock__load_task.return_value,
task=mock_task_create.return_value, abort_on_sla_failure=False)
mock_detailed.assert_called_once_with(
task_id=mock_task_create.return_value["uuid"])
mock_task_create.assert_called_once_with("any", "some_tag")
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_start_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.start, "path_to_config.json", None)
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.os.path.isdir", return_value=False)
@mock.patch("rally.cli.commands.task.api.Task.create",
return_value=fakes.FakeTask(temporary=False, tag="tag",
uuid="uuid"))
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value={"some": "json"})
@mock.patch("rally.cli.commands.task.api.Task.validate")
@mock.patch("rally.cli.commands.task.api.Task.start",
side_effect=exceptions.InvalidTaskException)
def test_start_invalid_task(self, mock_task_start, mock_task_validate,
mock__load_task, mock_task_create,
mock_os_path_isdir, mock_os_path_exists):
result = self.task.start("task_path", "deployment", tag="tag")
self.assertEqual(1, result)
mock_task_create.assert_called_once_with("deployment", "tag")
mock_task_start.assert_called_once_with(
"deployment", mock__load_task.return_value,
task=mock_task_create.return_value, abort_on_sla_failure=False)
@mock.patch("rally.cli.commands.task.api")
def test_abort(self, mock_api):
test_uuid = "17860c43-2274-498d-8669-448eff7b073f"
mock_api.Task.abort = mock.MagicMock()
self.task.abort(test_uuid)
mock_api.Task.abort.assert_called_once_with(test_uuid, False,
async=False)
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_abort_no_task_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.abort, None)
def test_status(self):
test_uuid = "a3e7cefb-bec2-4802-89f6-410cc31f71af"
value = {"task_id": "task", "status": "status"}
with mock.patch("rally.cli.commands.task.db") as mock_db:
mock_db.task_get = mock.MagicMock(return_value=value)
self.task.status(test_uuid)
mock_db.task_get.assert_called_once_with(test_uuid)
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_status_no_task_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.status, None)
@mock.patch("rally.cli.commands.task.db")
def test_detailed(self, mock_db):
test_uuid = "c0d874d4-7195-4fd5-8688-abe82bfad36f"
value = {
"id": "task",
"uuid": test_uuid,
"status": "status",
"results": [
{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"data": {
"load_duration": 1.0,
"full_duration": 2.0,
"raw": [
{
"duration": 0.9,
"idle_duration": 0.5,
"scenario_output": {
"data": {
"a": 3
},
"errors": "some"
},
"atomic_actions": {
"a": 0.6,
"b": 0.7
},
"error": ["type", "message", "traceback"]
},
{
"duration": 0.5,
"idle_duration": 0.2,
"scenario_output": {
"data": {
"a": 1
},
"errors": "some"
},
"atomic_actions": {
"a": 0.2,
"b": 0.4
},
"error": None
},
{
"duration": 0.6,
"idle_duration": 0.4,
"scenario_output": {
"data": {
"a": 2
},
"errors": None
},
"atomic_actions": {
"a": 0.3,
"b": 0.5
},
"error": None
}
]
}
}
]
}
mock_db.task_get_detailed = mock.MagicMock(return_value=value)
self.task.detailed(test_uuid)
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
self.task.detailed(test_uuid, iterations_data=True)
@mock.patch("rally.cli.commands.task.db")
@mock.patch("rally.cli.commands.task.logging")
def test_detailed_task_failed(self, mock_logging, mock_db):
value = {
"id": "task",
"uuid": "task_uuid",
"status": consts.TaskStatus.FAILED,
"results": [],
"verification_log": "['1', '2', '3']"
}
mock_db.task_get_detailed = mock.MagicMock(return_value=value)
mock_logging.is_debug.return_value = False
self.task.detailed("task_uuid")
mock_logging.is_debug.return_value = True
self.task.detailed("task_uuid")
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_detailed_no_task_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.detailed, None)
@mock.patch("rally.cli.commands.task.db")
def test_detailed_wrong_id(self, mock_db):
test_uuid = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
mock_db.task_get_detailed = mock.MagicMock(return_value=None)
self.task.detailed(test_uuid)
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
@mock.patch("json.dumps")
@mock.patch("rally.cli.commands.task.objects.Task.get")
def test_results(self, mock_task_get, mock_json_dumps):
task_id = "foo_task_id"
data = [
{"key": "foo_key", "data": {"raw": "foo_raw", "sla": [],
"load_duration": "lo_duration",
"full_duration": "fu_duration"}}
]
result = map(lambda x: {"key": x["key"],
"result": x["data"]["raw"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"],
"sla": x["data"]["sla"]}, data)
mock_results = mock.Mock(return_value=data)
mock_task_get.return_value = mock.Mock(get_results=mock_results)
self.task.results(task_id)
self.assertEqual(1, mock_json_dumps.call_count)
self.assertEqual(1, len(mock_json_dumps.call_args[0]))
self.assertSequenceEqual(result, mock_json_dumps.call_args[0][0])
self.assertEqual({"sort_keys": True, "indent": 4},
mock_json_dumps.call_args[1])
mock_task_get.assert_called_once_with(task_id)
@mock.patch("rally.cli.commands.task.sys.stdout")
@mock.patch("rally.cli.commands.task.objects.Task.get")
def test_results_no_data(self, mock_task_get, mock_stdout):
task_id = "foo_task_id"
mock_results = mock.Mock(return_value=[])
mock_task_get.return_value = mock.Mock(get_results=mock_results)
result = self.task.results(task_id)
mock_task_get.assert_called_once_with(task_id)
self.assertEqual(1, result)
expected_out = ("The task %s marked as '%s'. Results "
"available when it is '%s' .") % (
task_id, consts.TaskStatus.FAILED, consts.TaskStatus.FINISHED)
mock_stdout.write.assert_has_calls([mock.call(expected_out)])
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cli.commands.task.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.cli.commands.task.plot")
@mock.patch("rally.cli.commands.task.webbrowser")
@mock.patch("rally.cli.commands.task.objects.Task.get")
def test_report_one_uuid(self, mock_task_get, mock_webbrowser,
mock_plot, mock_open, mock_realpath,
mock_validate):
task_id = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
data = [
{"key": {"name": "class.test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "class.test", "pos": 0},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = [{"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in data]
mock_results = mock.Mock(return_value=data)
mock_task_get.return_value = mock.Mock(get_results=mock_results)
mock_plot.plot.return_value = "html_report"
def reset_mocks():
for m in mock_task_get, mock_webbrowser, mock_plot, mock_open:
m.reset_mock()
self.task.report(tasks=task_id, out="/tmp/%s.html" % task_id)
mock_open.assert_called_once_with("/tmp/%s.html" % task_id, "w+")
mock_plot.plot.assert_called_once_with(results)
mock_open.side_effect().write.assert_called_once_with("html_report")
mock_task_get.assert_called_once_with(task_id)
reset_mocks()
self.task.report(tasks=task_id, out="/tmp/%s.html" % task_id,
out_format="junit")
mock_open.assert_called_once_with("/tmp/%s.html" % task_id, "w+")
reset_mocks()
self.task.report(task_id, out="spam.html", open_it=True)
mock_webbrowser.open_new_tab.assert_called_once_with(
"file://realpath_spam.html")
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cli.commands.task.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.cli.commands.task.plot")
@mock.patch("rally.cli.commands.task.webbrowser")
@mock.patch("rally.cli.commands.task.objects.Task.get")
def test_report_bunch_uuids(self, mock_task_get, mock_webbrowser,
mock_plot, mock_open, mock_realpath,
mock_validate):
tasks = ["eb290c30-38d8-4c8f-bbcc-fc8f74b004ae",
"eb290c30-38d8-4c8f-bbcc-fc8f74b004af"]
data = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "test", "pos": 0},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = []
for task_uuid in tasks:
results.extend(
map(lambda x: {"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]},
data))
mock_results = mock.Mock(return_value=data)
mock_task_get.return_value = mock.Mock(get_results=mock_results)
mock_plot.plot.return_value = "html_report"
def reset_mocks():
for m in mock_task_get, mock_webbrowser, mock_plot, mock_open:
m.reset_mock()
self.task.report(tasks=tasks, out="/tmp/1_test.html")
mock_open.assert_called_once_with("/tmp/1_test.html", "w+")
mock_plot.plot.assert_called_once_with(results)
mock_open.side_effect().write.assert_called_once_with("html_report")
expected_get_calls = [mock.call(task) for task in tasks]
mock_task_get.assert_has_calls(expected_get_calls, any_order=True)
@mock.patch("rally.cli.commands.task.json.load")
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cli.commands.task.open", create=True)
@mock.patch("rally.cli.commands.task.plot")
def test_report_one_file(self, mock_plot, mock_open, mock_realpath,
mock_validate, mock_path_exists, mock_json_load):
task_file = "/tmp/some_file.json"
data = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "test", "pos": 1},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = [{"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in data]
mock_plot.plot.return_value = "html_report"
mock_open.side_effect = mock.mock_open(read_data=results)
mock_json_load.return_value = results
def reset_mocks():
for m in mock_plot, mock_open, mock_json_load, mock_validate:
m.reset_mock()
self.task.report(tasks=task_file, out="/tmp/1_test.html")
expected_open_calls = [mock.call(task_file, "r"),
mock.call("/tmp/1_test.html", "w+")]
mock_open.assert_has_calls(expected_open_calls, any_order=True)
mock_plot.plot.assert_called_once_with(results)
mock_open.side_effect().write.assert_called_once_with("html_report")
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.json.load")
@mock.patch("rally.cli.commands.task.open", create=True)
def test_report_exceptions(self, mock_open, mock_json_load,
mock_path_exists):
results = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}}]
mock_open.side_effect = mock.mock_open(read_data=results)
mock_json_load.return_value = results
ret = self.task.report(tasks="/tmp/task.json",
out="/tmp/tmp.hsml")
self.assertEqual(ret, 1)
for m in mock_open, mock_json_load:
m.reset_mock()
mock_path_exists.return_value = False
ret = self.task.report(tasks="/tmp/task.json",
out="/tmp/tmp.hsml")
self.assertEqual(ret, 1)
@mock.patch("rally.cli.commands.task.sys.stderr")
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.json.load")
@mock.patch("rally.cli.commands.task.open", create=True)
def test_report_invalid_format(self, mock_open, mock_json_load,
mock_path_exists, mock_stderr):
result = self.task.report(tasks="/tmp/task.json", out="/tmp/tmp.html",
out_format="invalid")
self.assertEqual(1, result)
expected_out = "Invalid output format: invalid"
mock_stderr.write.assert_has_calls([mock.call(expected_out)])
@mock.patch("rally.cli.commands.task.cliutils.print_list")
@mock.patch("rally.cli.commands.task.envutils.get_global",
return_value="123456789")
@mock.patch("rally.cli.commands.task.objects.Task.list",
return_value=[fakes.FakeTask(uuid="a",
created_at=date.datetime.now(),
updated_at=date.datetime.now(),
status="c",
tag="d",
deployment_name="some_name")])
def test_list(self, mock_task_list, mock_get_global, mock_print_list):
self.task.list(status="running")
mock_task_list.assert_called_once_with(
deployment=mock_get_global.return_value,
status=consts.TaskStatus.RUNNING)
headers = ["uuid", "deployment_name", "created_at", "duration",
"status", "tag"]
mock_print_list.assert_called_once_with(
mock_task_list.return_value, headers,
sortby_index=headers.index("created_at"))
@mock.patch("rally.cli.commands.task.cliutils.print_list")
@mock.patch("rally.cli.commands.task.envutils.get_global",
return_value="123456789")
@mock.patch("rally.cli.commands.task.objects.Task.list",
return_value=[fakes.FakeTask(uuid="a",
created_at=date.datetime.now(),
updated_at=date.datetime.now(),
status="c",
tag="d",
deployment_name="some_name")])
def test_list_uuids_only(self, mock_task_list, mock_get_global,
mock_print_list):
self.task.list(status="running", uuids_only=True)
mock_task_list.assert_called_once_with(
deployment=mock_get_global.return_value,
status=consts.TaskStatus.RUNNING)
mock_print_list.assert_called_once_with(
mock_task_list.return_value, ["uuid"],
print_header=False, print_border=False)
def test_list_wrong_status(self):
self.assertEqual(1, self.task.list(deployment="fake",
status="wrong non existing status"))
@mock.patch("rally.cli.commands.task.objects.Task.list", return_value=[])
def test_list_no_results(self, mock_task_list):
self.assertIsNone(
self.task.list(deployment="fake", all_deployments=True))
mock_task_list.assert_called_once_with()
mock_task_list.reset_mock()
self.assertIsNone(
self.task.list(deployment="d", status=consts.TaskStatus.RUNNING)
)
mock_task_list.assert_called_once_with(
deployment="d", status=consts.TaskStatus.RUNNING)
def test_delete(self):
task_uuid = "8dcb9c5e-d60b-4022-8975-b5987c7833f7"
force = False
with mock.patch("rally.cli.commands.task.api") as mock_api:
mock_api.Task.delete = mock.Mock()
self.task.delete(task_uuid, force=force)
mock_api.Task.delete.assert_called_once_with(task_uuid,
force=force)
@mock.patch("rally.cli.commands.task.api")
def test_delete_multiple_uuid(self, mock_api):
task_uuids = ["4bf35b06-5916-484f-9547-12dce94902b7",
"52cad69d-d3e4-47e1-b445-dec9c5858fe8",
"6a3cb11c-ac75-41e7-8ae7-935732bfb48f",
"018af931-0e5a-40d5-9d6f-b13f4a3a09fc"]
force = False
self.task.delete(task_uuids, force=force)
self.assertTrue(mock_api.Task.delete.call_count == len(task_uuids))
expected_calls = [mock.call(task_uuid, force=force) for task_uuid
in task_uuids]
self.assertTrue(mock_api.Task.delete.mock_calls == expected_calls)
@mock.patch("rally.cli.commands.task.cliutils.print_list")
@mock.patch("rally.cli.commands.task.objects.Task.get")
def test_sla_check(self, mock_task_get, mock_print_list):
data = [{"key": {"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"},
"data": {"scenario_duration": 42.0,
"raw": [],
"sla": [{"benchmark": "KeystoneBasic.create_user",
"criterion": "max_seconds_per_iteration",
"pos": 0,
"success": False,
"detail": "Max foo, actually bar"}]}}]
mock_task_get().get_results.return_value = copy.deepcopy(data)
result = self.task.sla_check(task_id="fake_task_id")
self.assertEqual(1, result)
mock_task_get.assert_called_with("fake_task_id")
data[0]["data"]["sla"][0]["success"] = True
mock_task_get().get_results.return_value = data
result = self.task.sla_check(task_id="fake_task_id", tojson=True)
self.assertEqual(0, result)
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.api.Task.validate")
@mock.patch("rally.cli.commands.task.open",
side_effect=mock.mock_open(read_data="{\"some\": \"json\"}"),
create=True)
def test_validate(self, mock_open, mock_task_validate,
mock_os_path_exists):
self.task.validate("path_to_config.json", "fake_id")
mock_task_validate.assert_called_once_with("fake_id", {"some": "json"},
None)
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
side_effect=task.FailedToLoadTask)
def test_validate_failed_to_load_task(self, mock__load_task,
mock_os_path_exists):
args = mock.MagicMock()
args_file = mock.MagicMock()
result = self.task.validate("path_to_task", "fake_deployment_id",
task_args=args, task_args_file=args_file)
self.assertEqual(1, result)
mock__load_task.assert_called_once_with(
"path_to_task", args, args_file)
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.TaskCommands._load_task")
@mock.patch("rally.api.Task.validate")
def test_validate_invalid(self, mock_task_validate, mock__load_task,
mock_os_path_exists):
mock_task_validate.side_effect = exceptions.InvalidTaskException
result = self.task.validate("path_to_task", "deployment")
self.assertEqual(1, result)
mock_task_validate.assert_called_once_with(
"deployment", mock__load_task.return_value, None)
@mock.patch("rally.common.fileutils._rewrite_env_file")
@mock.patch("rally.cli.commands.task.db.task_get", return_value=True)
def test_use(self, mock_task_get, mock__rewrite_env_file):
task_id = "80422553-5774-44bd-98ac-38bd8c7a0feb"
self.task.use(task_id)
mock__rewrite_env_file.assert_called_once_with(
os.path.expanduser("~/.rally/globals"),
["RALLY_TASK=%s\n" % task_id])
@mock.patch("rally.cli.commands.task.db.task_get")
def test_use_not_found(self, mock_task_get):
task_id = "ddc3f8ba-082a-496d-b18f-72cdf5c10a14"
mock_task_get.side_effect = exceptions.TaskNotFound(uuid=task_id)
self.assertRaises(exceptions.TaskNotFound, self.task.use, task_id)
| apache-2.0 | -2,379,006,896,939,867,600 | 46.104139 | 79 | 0.541283 | false |
sandyjmacdonald/dots_for_microarrays | dots_scripts/dots_workflow.py | 1 | 1929 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import glob
import argparse
from dots_backend.dots_plotting import do_boxplot, do_pcaplot, do_volcanoplot, do_heatmap, do_clusters_plot
from dots_backend.dots_arrays import read_experiment
from dots_backend.dots_analysis import get_fold_changes, write_fcs_stats, write_normalised_expression
## Set up argparse arguments
parser = argparse.ArgumentParser()
parser.add_argument('input', help='input folder, .e.g arrays')
parser.add_argument('-o', '--output', help='name of output folder')
args = parser.parse_args()
## Set up output folder.
if args.output:
outfolder = args.output if args.output.endswith('/') else args.output + '/'
else:
outfolder = 'output/'
if not os.path.isdir(outfolder):
os.makedirs(outfolder)
## Read in files and create experiment.
array_filenames = glob.glob(args.input + '*.txt' if args.input.endswith('/') else args.input + '/*.txt')
experiment = read_experiment(array_filenames)
experiment = experiment.baseline_to_median()
## Write tables.
write_fcs_stats(experiment, outfile=outfolder + 'foldchanges_stats.txt')
write_normalised_expression(experiment, outfile=outfolder + 'normalised_expression.txt')
## Do plots.
do_boxplot(experiment, show=False, image=True, html_file=outfolder + 'boxplot.html')
do_pcaplot(experiment, show=False, image=True, html_file=outfolder + 'pcaplot.html')
do_heatmap(experiment, show=False, image=True, html_file=outfolder + 'heatmap.html')
do_clusters_plot(experiment, show=False, image=True, html_file=outfolder + 'clustersplot.html')
## Get fold change columns for volcano plots.
fcs = get_fold_changes(experiment)
fc_cols = [x for x in fcs.columns.values if 'logFC' in x]
## For each pair of groups, create a volcano plot.
for col in fc_cols:
pair = col[6:]
groups = tuple(pair.split('_'))
do_volcanoplot(experiment, groups, show=False, image=True, html_file=outfolder + pair + '_volcanoplot.html') | mit | 7,454,683,969,489,343,000 | 38.387755 | 109 | 0.743909 | false |
abhidrona/gn-osc-custom | oscar/apps/catalogue/migrations/0011_auto__chg_field_productimage_original__chg_field_category_image.py | 18 | 13475 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ProductImage.original'
db.alter_column(u'catalogue_productimage', 'original', self.gf('django.db.models.fields.files.ImageField')(max_length=255))
# Changing field 'Category.image'
db.alter_column(u'catalogue_category', 'image', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True))
def backwards(self, orm):
# Changing field 'ProductImage.original'
db.alter_column(u'catalogue_productimage', 'original', self.gf('django.db.models.fields.files.ImageField')(max_length=100))
# Changing field 'Category.image'
db.alter_column(u'catalogue_category', 'image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
models = {
u'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"})
},
u'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
u'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.contributor': {
'Meta': {'object_name': 'Contributor'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.contributorrole': {
'Meta': {'object_name': 'ContributorRole'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
u'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'catalogue.productcontributor': {
'Meta': {'object_name': 'ProductContributor'},
'contributor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Contributor']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ContributorRole']", 'null': 'True', 'blank': 'True'})
},
u'catalogue.productimage': {
'Meta': {'ordering': "['display_order']", 'unique_together': "(('product', 'display_order'),)", 'object_name': 'ProductImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['catalogue.Product']"})
},
u'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
}
}
complete_apps = ['catalogue'] | bsd-3-clause | -7,592,863,747,906,435,000 | 78.270588 | 224 | 0.569425 | false |
bbbenja/SickRage | lib/hachoir_parser/archive/rpm.py | 95 | 8568 | """
RPM archive parser.
Author: Victor Stinner, 1st December 2005.
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, UInt64, Enum,
NullBytes, Bytes, RawBytes, SubFile,
Character, CString, String)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_parser.archive.gzip_parser import GzipParser
from hachoir_parser.archive.bzip2_parser import Bzip2Parser
class ItemContent(FieldSet):
format_type = {
0: UInt8,
1: Character,
2: UInt8,
3: UInt16,
4: UInt32,
5: UInt64,
6: CString,
7: RawBytes,
8: CString,
9: CString
}
def __init__(self, parent, name, item):
FieldSet.__init__(self, parent, name, item.description)
self.related_item = item
self._name = "content_%s" % item.name
def createFields(self):
item = self.related_item
type = item["type"].value
cls = self.format_type[type]
count = item["count"].value
if cls is RawBytes: # or type == 8:
if cls is RawBytes:
args = (self, "value", count)
else:
args = (self, "value") # cls is CString
count = 1
else:
if 1 < count:
args = (self, "value[]")
else:
args = (self, "value")
for index in xrange(count):
yield cls(*args)
class Item(FieldSet):
type_name = {
0: "NULL",
1: "CHAR",
2: "INT8",
3: "INT16",
4: "INT32",
5: "INT64",
6: "CSTRING",
7: "BIN",
8: "CSTRING_ARRAY",
9: "CSTRING?"
}
tag_name = {
1000: "File size",
1001: "(Broken) MD5 signature",
1002: "PGP 2.6.3 signature",
1003: "(Broken) MD5 signature",
1004: "MD5 signature",
1005: "GnuPG signature",
1006: "PGP5 signature",
1007: "Uncompressed payload size (bytes)",
256+8: "Broken SHA1 header digest",
256+9: "Broken SHA1 header digest",
256+13: "Broken SHA1 header digest",
256+11: "DSA header signature",
256+12: "RSA header signature"
}
def __init__(self, parent, name, description=None, tag_name_dict=None):
FieldSet.__init__(self, parent, name, description)
if tag_name_dict is None:
tag_name_dict = Item.tag_name
self.tag_name_dict = tag_name_dict
def createFields(self):
yield Enum(UInt32(self, "tag", "Tag"), self.tag_name_dict)
yield Enum(UInt32(self, "type", "Type"), Item.type_name)
yield UInt32(self, "offset", "Offset")
yield UInt32(self, "count", "Count")
def createDescription(self):
return "Item: %s (%s)" % (self["tag"].display, self["type"].display)
class ItemHeader(Item):
tag_name = {
61: "Current image",
62: "Signatures",
63: "Immutable",
64: "Regions",
100: "I18N string locales",
1000: "Name",
1001: "Version",
1002: "Release",
1003: "Epoch",
1004: "Summary",
1005: "Description",
1006: "Build time",
1007: "Build host",
1008: "Install time",
1009: "Size",
1010: "Distribution",
1011: "Vendor",
1012: "Gif",
1013: "Xpm",
1014: "Licence",
1015: "Packager",
1016: "Group",
1017: "Changelog",
1018: "Source",
1019: "Patch",
1020: "Url",
1021: "OS",
1022: "Arch",
1023: "Prein",
1024: "Postin",
1025: "Preun",
1026: "Postun",
1027: "Old filenames",
1028: "File sizes",
1029: "File states",
1030: "File modes",
1031: "File uids",
1032: "File gids",
1033: "File rdevs",
1034: "File mtimes",
1035: "File MD5s",
1036: "File link to's",
1037: "File flags",
1038: "Root",
1039: "File username",
1040: "File groupname",
1043: "Icon",
1044: "Source rpm",
1045: "File verify flags",
1046: "Archive size",
1047: "Provide name",
1048: "Require flags",
1049: "Require name",
1050: "Require version",
1051: "No source",
1052: "No patch",
1053: "Conflict flags",
1054: "Conflict name",
1055: "Conflict version",
1056: "Default prefix",
1057: "Build root",
1058: "Install prefix",
1059: "Exclude arch",
1060: "Exclude OS",
1061: "Exclusive arch",
1062: "Exclusive OS",
1064: "RPM version",
1065: "Trigger scripts",
1066: "Trigger name",
1067: "Trigger version",
1068: "Trigger flags",
1069: "Trigger index",
1079: "Verify script",
#TODO: Finish the list (id 1070..1162 using rpm library source code)
}
def __init__(self, parent, name, description=None):
Item.__init__(self, parent, name, description, self.tag_name)
def sortRpmItem(a,b):
return int( a["offset"].value - b["offset"].value )
class PropertySet(FieldSet):
def __init__(self, parent, name, *args):
FieldSet.__init__(self, parent, name, *args)
self._size = self["content_item[1]"].address + self["size"].value * 8
def createFields(self):
# Read chunk header
yield Bytes(self, "signature", 3, r"Property signature (\x8E\xAD\xE8)")
if self["signature"].value != "\x8E\xAD\xE8":
raise ParserError("Invalid property signature")
yield UInt8(self, "version", "Signature version")
yield NullBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "count", "Count")
yield UInt32(self, "size", "Size")
# Read item header
items = []
for i in range(0, self["count"].value):
item = ItemHeader(self, "item[]")
yield item
items.append(item)
# Sort items by their offset
items.sort( sortRpmItem )
# Read item content
start = self.current_size/8
for item in items:
offset = item["offset"].value
diff = offset - (self.current_size/8 - start)
if 0 < diff:
yield NullBytes(self, "padding[]", diff)
yield ItemContent(self, "content[]", item)
size = start + self["size"].value - self.current_size/8
if 0 < size:
yield NullBytes(self, "padding[]", size)
class RpmFile(Parser):
PARSER_TAGS = {
"id": "rpm",
"category": "archive",
"file_ext": ("rpm",),
"mime": (u"application/x-rpm",),
"min_size": (96 + 16 + 16)*8, # file header + checksum + content header
"magic": (('\xED\xAB\xEE\xDB', 0),),
"description": "RPM package"
}
TYPE_NAME = {
0: "Binary",
1: "Source"
}
endian = BIG_ENDIAN
def validate(self):
if self["signature"].value != '\xED\xAB\xEE\xDB':
return "Invalid signature"
if self["major_ver"].value != 3:
return "Unknown major version (%u)" % self["major_ver"].value
if self["type"].value not in self.TYPE_NAME:
return "Invalid RPM type"
return True
def createFields(self):
yield Bytes(self, "signature", 4, r"RPM file signature (\xED\xAB\xEE\xDB)")
yield UInt8(self, "major_ver", "Major version")
yield UInt8(self, "minor_ver", "Minor version")
yield Enum(UInt16(self, "type", "RPM type"), RpmFile.TYPE_NAME)
yield UInt16(self, "architecture", "Architecture")
yield String(self, "name", 66, "Archive name", strip="\0", charset="ASCII")
yield UInt16(self, "os", "OS")
yield UInt16(self, "signature_type", "Type of signature")
yield NullBytes(self, "reserved", 16, "Reserved")
yield PropertySet(self, "checksum", "Checksum (signature)")
yield PropertySet(self, "header", "Header")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
size = (self._size - self.current_size) // 8
if size:
if 3 <= size and self.stream.readBytes(self.current_size, 3) == "BZh":
yield SubFile(self, "content", size, "bzip2 content", parser=Bzip2Parser)
else:
yield SubFile(self, "content", size, "gzip content", parser=GzipParser)
| gpl-3.0 | -4,038,099,798,579,749,000 | 31.089888 | 89 | 0.537815 | false |
heiden-deng/anaconda | pyanaconda/ui/gui/spokes/advstorage/zfcp.py | 8 | 5845 | # zFCP configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Samantha N. Bueno <[email protected]>
#
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import gtk_action_nowait
from blivet.zfcp import ZFCPDevice
__all__ = ["ZFCPDialog"]
class ZFCPDialog(GUIObject):
""" Gtk dialog which allows users to manually add zFCP devices without
having previously specified them in a parm file.
"""
builderObjects = ["zfcpDialog"]
mainWidgetName = "zfcpDialog"
uiFile = "spokes/advstorage/zfcp.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.zfcp = self.storage.zfcp()
self._discoveryError = None
self._update_devicetree = False
# grab all of the ui objects
self._zfcpNotebook = self.builder.get_object("zfcpNotebook")
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._deviceEntry = self.builder.get_object("deviceEntry")
self._wwpnEntry = self.builder.get_object("wwpnEntry")
self._lunEntry = self.builder.get_object("lunEntry")
def refresh(self):
self._deviceEntry.set_text("")
self._deviceEntry.set_sensitive(True)
self._startButton.set_sensitive(True)
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
self.storage.devicetree.populate()
return rc
def _set_configure_sensitive(self, sensitivity):
""" Set entries to a given sensitivity. """
for child in self._configureGrid.get_children():
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
""" Go through the process of validating entry contents and then
attempt to add the device.
"""
# First update widgets
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._deviceEntry.set_sensitive(False)
# Make a zFCP object with some dummy credentials so we can validate our
# actual input
self._conditionNotebook.set_current_page(1)
dev = ZFCPDevice("0.0.0000", "0x0000000000000000", "0x0000000000000000")
# below really, really is ugly and needs to be re-factored, but this
# should give a good base idea as far as expected behavior should go
try:
device = dev.sanitizeDeviceInput(self._deviceEntry.get_text())
wwpn = dev.sanitizeWWPNInput(self._wwpnEntry.get_text())
lun = dev.sanitizeFCPLInput(self._lunEntry.get_text())
except ValueError as e:
_config_error = str(e)
self.builder.get_object("deviceErrorLabel").set_text(_config_error)
self._conditionNotebook.set_current_page(2)
spinner = self.builder.get_object("waitSpinner")
spinner.start()
self._discover(device, wwpn, lun)
self._check_discover()
@gtk_action_nowait
def _check_discover(self, *args):
""" After the zFCP discover thread runs, check to see whether a valid
device was discovered. Display an error message if not.
"""
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure, display a message and leave the user on the dialog so
# they can try again (or cancel)
self.builder.get_object("deviceErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Great success. Just return to the advanced storage window and let the
# UI update with the newly-added device
self.window.response(1)
return True
self._cancelButton.set_sensitive(True)
return False
def _discover(self, *args):
""" Given the configuration options from a user, attempt to discover
a zFCP device. This includes searching black-listed devices.
"""
# attempt to add the device
try:
self.zfcp.addFCP(args[0], args[1], args[2])
self._update_devicetree = True
except ValueError as e:
self._discoveryError = str(e)
return
| gpl-2.0 | -5,419,727,511,572,498,000 | 38.493243 | 86 | 0.656287 | false |
selahssea/ggrc-core | src/ggrc/models/categorization.py | 7 | 3037 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc import db
from sqlalchemy.ext.associationproxy import association_proxy
from .mixins import Base
BACKREF_NAME_FORMAT = '{type}_{scope}_categorizable'
class Categorization(Base, db.Model):
__tablename__ = 'categorizations'
category_id = db.Column(
db.Integer, db.ForeignKey('categories.id'), nullable=False)
category_type = db.Column(db.String)
categorizable_id = db.Column(db.Integer)
categorizable_type = db.Column(db.String)
@property
def category_attr(self):
return '{0}_category'.format(self.category_type)
@property
def category(self):
return getattr(self, self.category_attr)
@category.setter
def category(self, value):
self.category_id = value.id if value is not None else None
self.category_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.category_attr, value)
_publish_attrs = [
# 'categorizable',
'category',
]
_update_attrs = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Categorization, cls).eager_query()
return query.options(
orm.subqueryload('category'))
class Categorizable(object):
"""Subclasses **MUST** provide a declared_attr method that defines the
relationship and association_proxy. For example:
.. code-block:: python
@declared_attr
def control_categorizations(cls):
return cls.categorizations(
'control_categorizations',
'control_categories',
100,
)
"""
@classmethod
def declare_categorizable(cls, category_type, single, plural, ation):
setattr(
cls, plural,
association_proxy(
ation, 'category',
creator=lambda category: Categorization(
category_id=category.id,
category_type=category.__class__.__name__,
categorizable_type=cls.__name__
)
)
)
joinstr = (
'and_('
'foreign(Categorization.categorizable_id) == {type}.id, '
'foreign(Categorization.categorizable_type) == "{type}", '
'foreign(Categorization.category_type) == "{category_type}"'
')'
)
joinstr = joinstr.format(type=cls.__name__, category_type=category_type)
backref = '{type}_categorizable_{category_type}'.format(
type=cls.__name__,
category_type=category_type,
)
return db.relationship(
'Categorization',
primaryjoin=joinstr,
backref=backref,
cascade='all, delete-orphan',
)
@classmethod
def _filter_by_category(cls, category_type, predicate):
from ggrc.models.category import CategoryBase
return Categorization.query.join(CategoryBase).filter(
(Categorization.categorizable_type == cls.__name__) &
(Categorization.categorizable_id == cls.id) &
predicate(CategoryBase.name)
).exists()
| apache-2.0 | -9,022,900,897,160,025,000 | 28.201923 | 78 | 0.641752 | false |
mjs/postfix-charm | lib/charmhelpers/core/services/helpers.py | 8 | 9736 | # Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core import templating
from charmhelpers.core.services.base import ManagerCallback
__all__ = ['RelationContext', 'TemplateCallback',
'render_template', 'template']
class RelationContext(dict):
"""
Base class for a context generator that gets relation data from juju.
Subclasses must provide the attributes `name`, which is the name of the
interface of interest, `interface`, which is the type of the interface of
interest, and `required_keys`, which is the set of keys required for the
relation to be considered complete. The data for all interfaces matching
the `name` attribute that are complete will used to populate the dictionary
values (see `get_data`, below).
The generated context will be namespaced under the relation :attr:`name`,
to prevent potential naming conflicts.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = None
interface = None
def __init__(self, name=None, additional_required_keys=None):
if not hasattr(self, 'required_keys'):
self.required_keys = []
if name is not None:
self.name = name
if additional_required_keys:
self.required_keys.extend(additional_required_keys)
self.get_data()
def __bool__(self):
"""
Returns True if all of the required_keys are available.
"""
return self.is_ready()
__nonzero__ = __bool__
def __repr__(self):
return super(RelationContext, self).__repr__()
def is_ready(self):
"""
Returns True if all of the `required_keys` are available from any units.
"""
ready = len(self.get(self.name, [])) > 0
if not ready:
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
return ready
def _is_ready(self, unit_data):
"""
Helper method that tests a set of relation data and returns True if
all of the `required_keys` are present.
"""
return set(unit_data.keys()).issuperset(set(self.required_keys))
def get_data(self):
"""
Retrieve the relation data for each unit involved in a relation and,
if complete, store it in a list under `self[self.name]`. This
is automatically called when the RelationContext is instantiated.
The units are sorted lexographically first by the service ID, then by
the unit ID. Thus, if an interface has two other services, 'db:1'
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
set of data, the relation data for the units will be stored in the
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
If you only care about a single unit on the relation, you can just
access it as `{{ interface[0]['key'] }}`. However, if you can at all
support multiple units on a relation, you should iterate over the list,
like::
{% for unit in interface -%}
{{ unit['key'] }}{% if not loop.last %},{% endif %}
{%- endfor %}
Note that since all sets of relation data from all related services and
units are in a single list, if you need to know which service or unit a
set of data came from, you'll need to extend this class to preserve
that information.
"""
if not hookenv.relation_ids(self.name):
return
ns = self.setdefault(self.name, [])
for rid in sorted(hookenv.relation_ids(self.name)):
for unit in sorted(hookenv.related_units(rid)):
reldata = hookenv.relation_get(rid=rid, unit=unit)
if self._is_ready(reldata):
ns.append(reldata)
def provide_data(self):
"""
Return data to be relation_set for this interface.
"""
return {}
class MysqlRelation(RelationContext):
"""
Relation context for the `mysql` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'db'
interface = 'mysql'
def __init__(self, *args, **kwargs):
self.required_keys = ['host', 'user', 'password', 'database']
RelationContext.__init__(self, *args, **kwargs)
class HttpRelation(RelationContext):
"""
Relation context for the `http` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'website'
interface = 'http'
def __init__(self, *args, **kwargs):
self.required_keys = ['host', 'port']
RelationContext.__init__(self, *args, **kwargs)
def provide_data(self):
return {
'host': hookenv.unit_get('private-address'),
'port': 80,
}
class RequiredConfig(dict):
"""
Data context that loads config options with one or more mandatory options.
Once the required options have been changed from their default values, all
config options will be available, namespaced under `config` to prevent
potential naming conflicts (for example, between a config option and a
relation property).
:param list *args: List of options that must be changed from their default values.
"""
def __init__(self, *args):
self.required_options = args
self['config'] = hookenv.config()
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
self.config = yaml.load(fp).get('options', {})
def __bool__(self):
for option in self.required_options:
if option not in self['config']:
return False
current_value = self['config'][option]
default_value = self.config[option].get('default')
if current_value == default_value:
return False
if current_value in (None, '') and default_value in (None, ''):
return False
return True
def __nonzero__(self):
return self.__bool__()
class StoredContext(dict):
"""
A data context that always returns the data that it was first created with.
This is useful to do a one-time generation of things like passwords, that
will thereafter use the same value that was originally generated, instead
of generating a new value each time it is run.
"""
def __init__(self, file_name, config_data):
"""
If the file exists, populate `self` with the data from the file.
Otherwise, populate with the given data and persist it to the file.
"""
if os.path.exists(file_name):
self.update(self.read_context(file_name))
else:
self.store_context(file_name, config_data)
self.update(config_data)
def store_context(self, file_name, config_data):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0o600)
yaml.dump(config_data, file_stream)
def read_context(self, file_name):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'r') as file_stream:
data = yaml.load(file_stream)
if not data:
raise OSError("%s is empty" % file_name)
return data
class TemplateCallback(ManagerCallback):
"""
Callback class that will render a Jinja2 template, for use as a ready
action.
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
"""
def __init__(self, source, target,
owner='root', group='root', perms=0o444):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
context = {}
for ctx in service.get('required_data', []):
context.update(ctx)
templating.render(self.source, self.target, context,
self.owner, self.group, self.perms)
# Convenience aliases for templates
render_template = template = TemplateCallback
| mit | -3,186,722,227,022,911,000 | 35.464419 | 97 | 0.631676 | false |
njoyce/pyamf | doc/tutorials/examples/general/helloworld/python/server.py | 8 | 1347 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Hello world example server.
@see: U{HelloWorld<http://pyamf.org/tutorials/general/helloworld/index.html>} wiki page.
@since: 0.1.0
"""
def echo(data):
"""
Just return data back to the client.
"""
return data
services = {
'echo': echo,
'echo.echo': echo
}
if __name__ == '__main__':
import os
from pyamf.remoting.gateway.wsgi import WSGIGateway
from wsgiref import simple_server
gw = WSGIGateway(services)
httpd = simple_server.WSGIServer(
('localhost', 8000),
simple_server.WSGIRequestHandler,
)
def app(environ, start_response):
if environ['PATH_INFO'] == '/crossdomain.xml':
fn = os.path.join(os.getcwd(), os.path.dirname(__file__),
'crossdomain.xml')
fp = open(fn, 'rt')
buffer = fp.readlines()
fp.close()
start_response('200 OK', [
('Content-Type', 'application/xml'),
('Content-Length', str(len(''.join(buffer))))
])
return buffer
return gw(environ, start_response)
httpd.set_app(app)
print "Running Hello World AMF gateway on http://localhost:8000"
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
| mit | -7,744,107,146,699,620,000 | 21.081967 | 88 | 0.575353 | false |
codevlabs/ZeroNet | src/main.py | 10 | 11710 | # Included modules
import os
import sys
import time
import logging
# Third party modules
import gevent
from gevent import monkey
update_after_shutdown = False # If set True then update and restart zeronet after main loop ended
# Load config
from Config import config
config.parse(silent=True) # Plugins need to access the configuration
# Create necessary files and dirs
if not os.path.isdir(config.log_dir):
os.mkdir(config.log_dir)
if not os.path.isdir(config.data_dir):
os.mkdir(config.data_dir)
if not os.path.isfile("%s/sites.json" % config.data_dir):
open("%s/sites.json" % config.data_dir, "w").write("{}")
if not os.path.isfile("%s/users.json" % config.data_dir):
open("%s/users.json" % config.data_dir, "w").write("{}")
# Setup logging
if config.action == "main":
if os.path.isfile("%s/debug.log" % config.log_dir): # Simple logrotate
if os.path.isfile("%s/debug-last.log" % config.log_dir):
os.unlink("%s/debug-last.log" % config.log_dir)
os.rename("%s/debug.log" % config.log_dir, "%s/debug-last.log" % config.log_dir)
logging.basicConfig(format='[%(asctime)s] %(levelname)-8s %(name)s %(message)s',
level=logging.DEBUG, filename="%s/debug.log" % config.log_dir)
else:
logging.basicConfig(level=logging.DEBUG, stream=open(os.devnull, "w")) # No file logging if action is not main
# Console logger
console_log = logging.StreamHandler()
if config.action == "main": # Add time if main action
console_log.setFormatter(logging.Formatter('[%(asctime)s] %(name)s %(message)s', "%H:%M:%S"))
else:
console_log.setFormatter(logging.Formatter('%(name)s %(message)s', "%H:%M:%S"))
logging.getLogger('').addHandler(console_log) # Add console logger
logging.getLogger('').name = "-" # Remove root prefix
# Debug dependent configuration
from Debug import DebugHook
if config.debug:
console_log.setLevel(logging.DEBUG) # Display everything to console
else:
console_log.setLevel(logging.INFO) # Display only important info to console
monkey.patch_all(thread=False) # Not thread: pyfilesystem and system tray icon not compatible
# Load plugins
from Plugin import PluginManager
PluginManager.plugin_manager.loadPlugins()
config.loadPlugins()
config.parse() # Parse again to add plugin configuration options
# Log current config
logging.debug("Config: %s" % config)
# Use pure-python implementation of msgpack to save CPU
if config.msgpack_purepython:
os.environ["MSGPACK_PUREPYTHON"] = "True"
# Socks Proxy monkey patch
if config.proxy:
from util import SocksProxy
import urllib2
logging.info("Patching sockets to socks proxy: %s" % config.proxy)
config.fileserver_ip = '127.0.0.1' # Do not accept connections anywhere but localhost
SocksProxy.monkeyPath(*config.proxy.split(":"))
# -- Actions --
@PluginManager.acceptPlugins
class Actions(object):
def call(self, function_name, kwargs):
func = getattr(self, function_name, None)
func(**kwargs)
# Default action: Start serving UiServer and FileServer
def main(self):
logging.info("Version: %s r%s, Python %s, Gevent: %s" % (config.version, config.rev, sys.version, gevent.__version__))
global ui_server, file_server
from File import FileServer
from Ui import UiServer
logging.info("Creating UiServer....")
ui_server = UiServer()
logging.info("Removing old SSL certs...")
from Crypt import CryptConnection
CryptConnection.manager.removeCerts()
logging.info("Creating FileServer....")
file_server = FileServer()
logging.info("Starting servers....")
gevent.joinall([gevent.spawn(ui_server.start), gevent.spawn(file_server.start)])
# Site commands
def siteCreate(self):
logging.info("Generating new privatekey...")
from Crypt import CryptBitcoin
privatekey = CryptBitcoin.newPrivatekey()
logging.info("----------------------------------------------------------------------")
logging.info("Site private key: %s" % privatekey)
logging.info(" !!! ^ Save it now, required to modify the site ^ !!!")
address = CryptBitcoin.privatekeyToAddress(privatekey)
logging.info("Site address: %s" % address)
logging.info("----------------------------------------------------------------------")
while True and not config.batch:
if raw_input("? Have you secured your private key? (yes, no) > ").lower() == "yes":
break
else:
logging.info("Please, secure it now, you going to need it to modify your site!")
logging.info("Creating directory structure...")
from Site import Site
os.mkdir("%s/%s" % (config.data_dir, address))
open("%s/%s/index.html" % (config.data_dir, address), "w").write("Hello %s!" % address)
logging.info("Creating content.json...")
site = Site(address)
site.content_manager.sign(privatekey=privatekey)
site.settings["own"] = True
site.saveSettings()
logging.info("Site created!")
def siteSign(self, address, privatekey=None, inner_path="content.json", publish=False):
from Site import Site
logging.info("Signing site: %s..." % address)
site = Site(address, allow_create=False)
if not privatekey: # If no privatekey in args then ask it now
import getpass
privatekey = getpass.getpass("Private key (input hidden):")
succ = site.content_manager.sign(inner_path=inner_path, privatekey=privatekey, update_changed_files=True)
if succ and publish:
self.sitePublish(address, inner_path=inner_path)
def siteVerify(self, address):
import time
from Site import Site
s = time.time()
logging.info("Verifing site: %s..." % address)
site = Site(address)
bad_files = []
for content_inner_path in site.content_manager.contents:
logging.info("Verifing %s signature..." % content_inner_path)
file_correct = site.content_manager.verifyFile(
content_inner_path, site.storage.open(content_inner_path, "rb"), ignore_same=False
)
if file_correct is True:
logging.info("[OK] %s signed by address %s!" % (content_inner_path, address))
else:
logging.error("[ERROR] %s: invalid file!" % content_inner_path)
bad_files += content_inner_path
logging.info("Verifying site files...")
bad_files += site.storage.verifyFiles()
if not bad_files:
logging.info("[OK] All file sha512sum matches! (%.3fs)" % (time.time() - s))
else:
logging.error("[ERROR] Error during verifying site files!")
def dbRebuild(self, address):
from Site import Site
logging.info("Rebuilding site sql cache: %s..." % address)
site = Site(address)
s = time.time()
site.storage.rebuildDb()
logging.info("Done in %.3fs" % (time.time() - s))
def dbQuery(self, address, query):
from Site import Site
import json
site = Site(address)
result = []
for row in site.storage.query(query):
result.append(dict(row))
print json.dumps(result, indent=4)
def siteAnnounce(self, address):
from Site.Site import Site
logging.info("Announcing site %s to tracker..." % address)
site = Site(address)
s = time.time()
site.announce()
print "Response time: %.3fs" % (time.time() - s)
print site.peers
def siteNeedFile(self, address, inner_path):
from Site import Site
site = Site(address)
site.announce()
print site.needFile(inner_path, update=True)
def sitePublish(self, address, peer_ip=None, peer_port=15441, inner_path="content.json"):
global file_server
from Site import Site
from File import FileServer # We need fileserver to handle incoming file requests
logging.info("Creating FileServer....")
file_server = FileServer()
file_server_thread = gevent.spawn(file_server.start, check_sites=False) # Dont check every site integrity
file_server.openport()
site = file_server.sites[address]
site.settings["serving"] = True # Serving the site even if its disabled
if peer_ip: # Announce ip specificed
site.addPeer(peer_ip, peer_port)
else: # Just ask the tracker
logging.info("Gathering peers from tracker")
site.announce() # Gather peers
published = site.publish(20, inner_path) # Push to 20 peers
if published > 0:
time.sleep(3)
logging.info("Serving files (max 60s)...")
gevent.joinall([file_server_thread], timeout=60)
logging.info("Done.")
else:
logging.info("No peers found, sitePublish command only works if you already have visitors serving your site")
# Crypto commands
def cryptPrivatekeyToAddress(self, privatekey=None):
from Crypt import CryptBitcoin
if not privatekey: # If no privatekey in args then ask it now
import getpass
privatekey = getpass.getpass("Private key (input hidden):")
print CryptBitcoin.privatekeyToAddress(privatekey)
def cryptSign(self, message, privatekey):
from Crypt import CryptBitcoin
print CryptBitcoin.sign(message, privatekey)
# Peer
def peerPing(self, peer_ip, peer_port=None):
if not peer_port:
peer_port = config.fileserver_port
logging.info("Opening a simple connection server")
global file_server
from Connection import ConnectionServer
file_server = ConnectionServer("127.0.0.1", 1234)
from Peer import Peer
logging.info("Pinging 5 times peer: %s:%s..." % (peer_ip, int(peer_port)))
peer = Peer(peer_ip, peer_port)
for i in range(5):
s = time.time()
print peer.ping(),
print "Response time: %.3fs (crypt: %s)" % (time.time() - s, peer.connection.crypt)
time.sleep(1)
def peerGetFile(self, peer_ip, peer_port, site, filename, benchmark=False):
logging.info("Opening a simple connection server")
global file_server
from Connection import ConnectionServer
file_server = ConnectionServer()
from Peer import Peer
logging.info("Getting %s/%s from peer: %s:%s..." % (site, filename, peer_ip, peer_port))
peer = Peer(peer_ip, peer_port)
s = time.time()
peer.getFile(site, filename)
if benchmark:
for i in range(10):
print peer.getFile(site, filename),
print "Response time: %.3fs" % (time.time() - s)
raw_input("Check memory")
def peerCmd(self, peer_ip, peer_port, cmd, parameters):
logging.info("Opening a simple connection server")
global file_server
from Connection import ConnectionServer
file_server = ConnectionServer()
from Peer import Peer
peer = Peer(peer_ip, peer_port)
import json
if parameters:
parameters = json.loads(parameters.replace("'", '"'))
else:
parameters = {}
logging.info("Response: %s" % peer.request(cmd, parameters))
actions = Actions()
# Starts here when running zeronet.py
def start():
# Call function
action_kwargs = config.getActionArguments()
actions.call(config.action, action_kwargs)
| gpl-2.0 | 6,025,404,489,523,852,000 | 36.774194 | 126 | 0.623143 | false |
EliteTK/qutebrowser | qutebrowser/misc/ipc.py | 6 | 19531 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for IPC with existing instances."""
import os
import sys
import time
import json
import getpass
import binascii
import hashlib
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, Qt
from PyQt5.QtNetwork import QLocalSocket, QLocalServer, QAbstractSocket
import qutebrowser
from qutebrowser.utils import log, usertypes, error, objreg, standarddir
CONNECT_TIMEOUT = 100 # timeout for connecting/disconnecting
WRITE_TIMEOUT = 1000
READ_TIMEOUT = 5000
ATIME_INTERVAL = 60 * 60 * 6 * 1000 # 6 hours
PROTOCOL_VERSION = 1
def _get_socketname_legacy(basedir):
"""Legacy implementation of _get_socketname."""
parts = ['qutebrowser', getpass.getuser()]
if basedir is not None:
md5 = hashlib.md5(basedir.encode('utf-8')).hexdigest()
parts.append(md5)
return '-'.join(parts)
def _get_socketname(basedir, legacy=False):
"""Get a socketname to use."""
if legacy or os.name == 'nt':
return _get_socketname_legacy(basedir)
parts_to_hash = [getpass.getuser()]
if basedir is not None:
parts_to_hash.append(basedir)
data_to_hash = '-'.join(parts_to_hash).encode('utf-8')
md5 = hashlib.md5(data_to_hash).hexdigest()
target_dir = standarddir.runtime()
parts = ['ipc']
parts.append(md5)
return os.path.join(target_dir, '-'.join(parts))
class Error(Exception):
"""Base class for IPC exceptions."""
class SocketError(Error):
"""Exception raised when there was an error with a QLocalSocket.
Args:
code: The error code.
message: The error message.
action: The action which was taken when the error happened.
"""
def __init__(self, action, socket):
"""Constructor.
Args:
action: The action which was taken when the error happened.
socket: The QLocalSocket which has the error set.
"""
super().__init__()
self.action = action
self.code = socket.error()
self.message = socket.errorString()
def __str__(self):
return "Error while {}: {} (error {})".format(
self.action, self.message, self.code)
class ListenError(Error):
"""Exception raised when there was a problem with listening to IPC.
Args:
code: The error code.
message: The error message.
"""
def __init__(self, server):
"""Constructor.
Args:
server: The QLocalServer which has the error set.
"""
super().__init__()
self.code = server.serverError()
self.message = server.errorString()
def __str__(self):
return "Error while listening to IPC server: {} (error {})".format(
self.message, self.code)
class AddressInUseError(ListenError):
"""Emitted when the server address is already in use."""
class IPCServer(QObject):
"""IPC server to which clients connect to.
Attributes:
ignored: Whether requests are ignored (in exception hook).
_timer: A timer to handle timeouts.
_server: A QLocalServer to accept new connections.
_socket: The QLocalSocket we're currently connected to.
_socketname: The socketname to use.
_socketopts_ok: Set if using setSocketOptions is working with this
OS/Qt version.
_atime_timer: Timer to update the atime of the socket regularly.
Signals:
got_args: Emitted when there was an IPC connection and arguments were
passed.
got_args: Emitted with the raw data an IPC connection got.
got_invalid_data: Emitted when there was invalid incoming data.
"""
got_args = pyqtSignal(list, str, str)
got_raw = pyqtSignal(bytes)
got_invalid_data = pyqtSignal()
def __init__(self, socketname, parent=None):
"""Start the IPC server and listen to commands.
Args:
socketname: The socketname to use.
parent: The parent to be used.
"""
super().__init__(parent)
self.ignored = False
self._socketname = socketname
self._timer = usertypes.Timer(self, 'ipc-timeout')
self._timer.setInterval(READ_TIMEOUT)
self._timer.timeout.connect(self.on_timeout)
if os.name == 'nt': # pragma: no cover
self._atime_timer = None
else:
self._atime_timer = usertypes.Timer(self, 'ipc-atime')
self._atime_timer.setInterval(ATIME_INTERVAL)
self._atime_timer.timeout.connect(self.update_atime)
self._atime_timer.setTimerType(Qt.VeryCoarseTimer)
self._server = QLocalServer(self)
self._server.newConnection.connect(self.handle_connection)
self._socket = None
self._socketopts_ok = os.name == 'nt'
if self._socketopts_ok: # pragma: no cover
# If we use setSocketOptions on Unix with Qt < 5.4, we get a
# NameError while listening...
log.ipc.debug("Calling setSocketOptions")
self._server.setSocketOptions(QLocalServer.UserAccessOption)
else: # pragma: no cover
log.ipc.debug("Not calling setSocketOptions")
def _remove_server(self):
"""Remove an existing server."""
ok = QLocalServer.removeServer(self._socketname)
if not ok:
raise Error("Error while removing server {}!".format(
self._socketname))
def listen(self):
"""Start listening on self._socketname."""
log.ipc.debug("Listening as {}".format(self._socketname))
if self._atime_timer is not None: # pragma: no branch
self._atime_timer.start()
self._remove_server()
ok = self._server.listen(self._socketname)
if not ok:
if self._server.serverError() == QAbstractSocket.AddressInUseError:
raise AddressInUseError(self._server)
else:
raise ListenError(self._server)
if not self._socketopts_ok: # pragma: no cover
# If we use setSocketOptions on Unix with Qt < 5.4, we get a
# NameError while listening.
# (see b135569d5c6e68c735ea83f42e4baf51f7972281)
#
# Also, we don't get an AddressInUseError with Qt 5.5:
# https://bugreports.qt.io/browse/QTBUG-48635
#
# This means we only use setSocketOption on Windows...
try:
os.chmod(self._server.fullServerName(), 0o700)
except FileNotFoundError:
# https://github.com/The-Compiler/qutebrowser/issues/1530
# The server doesn't actually exist even if ok was reported as
# True, so report this as an error.
raise ListenError(self._server)
@pyqtSlot('QLocalSocket::LocalSocketError')
def on_error(self, err):
"""Raise SocketError on fatal errors."""
if self._socket is None:
# Sometimes this gets called from stale sockets.
log.ipc.debug("In on_error with None socket!")
return
self._timer.stop()
log.ipc.debug("Socket 0x{:x}: error {}: {}".format(
id(self._socket), self._socket.error(),
self._socket.errorString()))
if err != QLocalSocket.PeerClosedError:
raise SocketError("handling IPC connection", self._socket)
@pyqtSlot()
def handle_connection(self):
"""Handle a new connection to the server."""
if self.ignored:
return
if self._socket is not None:
log.ipc.debug("Got new connection but ignoring it because we're "
"still handling another one (0x{:x}).".format(
id(self._socket)))
return
socket = self._server.nextPendingConnection()
if socket is None:
log.ipc.debug("No new connection to handle.")
return
log.ipc.debug("Client connected (socket 0x{:x}).".format(id(socket)))
self._timer.start()
self._socket = socket
socket.readyRead.connect(self.on_ready_read)
if socket.canReadLine():
log.ipc.debug("We can read a line immediately.")
self.on_ready_read()
socket.error.connect(self.on_error)
if socket.error() not in [QLocalSocket.UnknownSocketError,
QLocalSocket.PeerClosedError]:
log.ipc.debug("We got an error immediately.")
self.on_error(socket.error())
socket.disconnected.connect(self.on_disconnected)
if socket.state() == QLocalSocket.UnconnectedState:
log.ipc.debug("Socket was disconnected immediately.")
self.on_disconnected()
@pyqtSlot()
def on_disconnected(self):
"""Clean up socket when the client disconnected."""
log.ipc.debug("Client disconnected from socket 0x{:x}.".format(
id(self._socket)))
self._timer.stop()
if self._socket is None:
log.ipc.debug("In on_disconnected with None socket!")
else:
self._socket.deleteLater()
self._socket = None
# Maybe another connection is waiting.
self.handle_connection()
def _handle_invalid_data(self):
"""Handle invalid data we got from a QLocalSocket."""
log.ipc.error("Ignoring invalid IPC data from socket 0x{:x}.".format(
id(self._socket)))
self.got_invalid_data.emit()
self._socket.error.connect(self.on_error)
self._socket.disconnectFromServer()
def _handle_data(self, data):
"""Handle data (as bytes) we got from on_ready_ready_read."""
try:
decoded = data.decode('utf-8')
except UnicodeDecodeError:
log.ipc.error("invalid utf-8: {!r}".format(binascii.hexlify(data)))
self._handle_invalid_data()
return
log.ipc.debug("Processing: {}".format(decoded))
try:
json_data = json.loads(decoded)
except ValueError:
log.ipc.error("invalid json: {}".format(decoded.strip()))
self._handle_invalid_data()
return
for name in ['args', 'target_arg']:
if name not in json_data:
log.ipc.error("Missing {}: {}".format(name, decoded.strip()))
self._handle_invalid_data()
return
try:
protocol_version = int(json_data['protocol_version'])
except (KeyError, ValueError):
log.ipc.error("invalid version: {}".format(decoded.strip()))
self._handle_invalid_data()
return
if protocol_version != PROTOCOL_VERSION:
log.ipc.error("incompatible version: expected {}, got {}".format(
PROTOCOL_VERSION, protocol_version))
self._handle_invalid_data()
return
args = json_data['args']
target_arg = json_data['target_arg']
if target_arg is None:
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-April/037375.html
target_arg = ''
cwd = json_data.get('cwd', '')
assert cwd is not None
self.got_args.emit(args, target_arg, cwd)
@pyqtSlot()
def on_ready_read(self):
"""Read json data from the client."""
if self._socket is None:
# This happens when doing a connection while another one is already
# active for some reason.
log.ipc.warning("In on_ready_read with None socket!")
return
self._timer.stop()
while self._socket is not None and self._socket.canReadLine():
data = bytes(self._socket.readLine())
self.got_raw.emit(data)
log.ipc.debug("Read from socket 0x{:x}: {!r}".format(
id(self._socket), data))
self._handle_data(data)
self._timer.start()
@pyqtSlot()
def on_timeout(self):
"""Cancel the current connection if it was idle for too long."""
log.ipc.error("IPC connection timed out "
"(socket 0x{:x}).".format(id(self._socket)))
self._socket.disconnectFromServer()
if self._socket is not None: # pragma: no cover
# on_socket_disconnected sets it to None
self._socket.waitForDisconnected(CONNECT_TIMEOUT)
if self._socket is not None: # pragma: no cover
# on_socket_disconnected sets it to None
self._socket.abort()
@pyqtSlot()
def update_atime(self):
"""Update the atime of the socket file all few hours.
From the XDG basedir spec:
To ensure that your files are not removed, they should have their
access time timestamp modified at least once every 6 hours of monotonic
time or the 'sticky' bit should be set on the file.
"""
path = self._server.fullServerName()
if not path:
log.ipc.error("In update_atime with no server path!")
return
log.ipc.debug("Touching {}".format(path))
os.utime(path)
def shutdown(self):
"""Shut down the IPC server cleanly."""
log.ipc.debug("Shutting down IPC (socket 0x{:x})".format(
id(self._socket)))
if self._socket is not None:
self._socket.deleteLater()
self._socket = None
self._timer.stop()
if self._atime_timer is not None: # pragma: no branch
self._atime_timer.stop()
try:
self._atime_timer.timeout.disconnect(self.update_atime)
except TypeError:
pass
self._server.close()
self._server.deleteLater()
self._remove_server()
def _has_legacy_server(name):
"""Check if there is a legacy server.
Args:
name: The name to try to connect to.
Return:
True if there is a server with the given name, False otherwise.
"""
socket = QLocalSocket()
log.ipc.debug("Trying to connect to {}".format(name))
socket.connectToServer(name)
err = socket.error()
if err != QLocalSocket.UnknownSocketError:
log.ipc.debug("Socket error: {} ({})".format(
socket.errorString(), err))
os_x_fail = (sys.platform == 'darwin' and
socket.errorString() == 'QLocalSocket::connectToServer: '
'Unknown error 38')
if err not in [QLocalSocket.ServerNotFoundError,
QLocalSocket.ConnectionRefusedError] and not os_x_fail:
return True
socket.disconnectFromServer()
if socket.state() != QLocalSocket.UnconnectedState:
socket.waitForDisconnected(CONNECT_TIMEOUT)
return False
def send_to_running_instance(socketname, command, target_arg, *,
legacy_name=None, socket=None):
"""Try to send a commandline to a running instance.
Blocks for CONNECT_TIMEOUT ms.
Args:
socketname: The name which should be used for the socket.
command: The command to send to the running instance.
target_arg: --target command line argument
socket: The socket to read data from, or None.
legacy_name: The legacy name to first try to connect to.
Return:
True if connecting was successful, False if no connection was made.
"""
if socket is None:
socket = QLocalSocket()
if legacy_name is not None and _has_legacy_server(legacy_name):
name_to_use = legacy_name
else:
name_to_use = socketname
log.ipc.debug("Connecting to {}".format(name_to_use))
socket.connectToServer(name_to_use)
connected = socket.waitForConnected(CONNECT_TIMEOUT)
if connected:
log.ipc.info("Opening in existing instance")
json_data = {'args': command, 'target_arg': target_arg,
'version': qutebrowser.__version__,
'protocol_version': PROTOCOL_VERSION}
try:
cwd = os.getcwd()
except OSError:
pass
else:
json_data['cwd'] = cwd
line = json.dumps(json_data) + '\n'
data = line.encode('utf-8')
log.ipc.debug("Writing: {!r}".format(data))
socket.writeData(data)
socket.waitForBytesWritten(WRITE_TIMEOUT)
if socket.error() != QLocalSocket.UnknownSocketError:
raise SocketError("writing to running instance", socket)
else:
socket.disconnectFromServer()
if socket.state() != QLocalSocket.UnconnectedState:
socket.waitForDisconnected(CONNECT_TIMEOUT)
return True
else:
if socket.error() not in [QLocalSocket.ConnectionRefusedError,
QLocalSocket.ServerNotFoundError]:
raise SocketError("connecting to running instance", socket)
else:
log.ipc.debug("No existing instance present (error {})".format(
socket.error()))
return False
def display_error(exc, args):
"""Display a message box with an IPC error."""
error.handle_fatal_exc(
exc, args, "Error while connecting to running instance!",
post_text="Maybe another instance is running but frozen?")
def send_or_listen(args):
"""Send the args to a running instance or start a new IPCServer.
Args:
args: The argparse namespace.
Return:
The IPCServer instance if no running instance was detected.
None if an instance was running and received our request.
"""
socketname = _get_socketname(args.basedir)
legacy_socketname = _get_socketname(args.basedir, legacy=True)
try:
try:
sent = send_to_running_instance(socketname, args.command,
args.target,
legacy_name=legacy_socketname)
if sent:
return None
log.init.debug("Starting IPC server...")
server = IPCServer(socketname)
server.listen()
objreg.register('ipc-server', server)
return server
except AddressInUseError as e:
# This could be a race condition...
log.init.debug("Got AddressInUseError, trying again.")
time.sleep(0.5)
sent = send_to_running_instance(socketname, args.command,
args.target,
legacy_name=legacy_socketname)
if sent:
return None
else:
raise
except Error as e:
display_error(e, args)
raise
| gpl-3.0 | -5,241,322,583,141,148,000 | 34.575592 | 86 | 0.596129 | false |
dilawar/moose-full | moose-core/python/moose/neuroml2/generated_neuroml.py | 2 | 717194 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Sun Jul 28 10:18:38 2013 by generateDS.py version 2.10a.
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class Annotation(GeneratedsSuper):
"""Placeholder for MIRIAM related metadata, among others."""
subclass = None
superclass = None
def __init__(self, anytypeobjs_=None):
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if Annotation.subclass:
return Annotation.subclass(*args_, **kwargs_)
else:
return Annotation(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Annotation', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Annotation')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Annotation'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Annotation', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Annotation'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'Annotation')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class Annotation
class ComponentType(GeneratedsSuper):
"""Contains an extension to NeuroML by creating custom LEMS
ComponentType."""
subclass = None
superclass = None
def __init__(self, extends=None, name=None, description=None, anytypeobjs_=None):
self.extends = _cast(None, extends)
self.name = _cast(None, name)
self.description = _cast(None, description)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if ComponentType.subclass:
return ComponentType.subclass(*args_, **kwargs_)
else:
return ComponentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_extends(self): return self.extends
def set_extends(self, extends): self.extends = extends
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_description(self): return self.description
def set_description(self, description): self.description = description
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ComponentType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ComponentType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ComponentType'):
if self.extends is not None and 'extends' not in already_processed:
already_processed.add('extends')
outfile.write(' extends=%s' % (self.gds_format_string(quote_attrib(self.extends).encode(ExternalEncoding), input_name='extends'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.description is not None and 'description' not in already_processed:
already_processed.add('description')
outfile.write(' description=%s' % (self.gds_format_string(quote_attrib(self.description).encode(ExternalEncoding), input_name='description'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ComponentType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ComponentType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.extends is not None and 'extends' not in already_processed:
already_processed.add('extends')
showIndent(outfile, level)
outfile.write('extends="%s",\n' % (self.extends,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.description is not None and 'description' not in already_processed:
already_processed.add('description')
showIndent(outfile, level)
outfile.write('description="%s",\n' % (self.description,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('extends', node)
if value is not None and 'extends' not in already_processed:
already_processed.add('extends')
self.extends = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('description', node)
if value is not None and 'description' not in already_processed:
already_processed.add('description')
self.description = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'ComponentType')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class ComponentType
class IncludeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, href=None, valueOf_=None, mixedclass_=None, content_=None):
self.href = _cast(None, href)
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if IncludeType.subclass:
return IncludeType.subclass(*args_, **kwargs_)
else:
return IncludeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IncludeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IncludeType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IncludeType'):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
def exportChildren(self, outfile, level, namespace_='', name_='IncludeType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='IncludeType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
pass
# end class IncludeType
class Q10Settings(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, fixedQ10=None, experimentalTemp=None, type_=None, q10Factor=None):
self.fixedQ10 = _cast(None, fixedQ10)
self.experimentalTemp = _cast(None, experimentalTemp)
self.type_ = _cast(None, type_)
self.q10Factor = _cast(None, q10Factor)
pass
def factory(*args_, **kwargs_):
if Q10Settings.subclass:
return Q10Settings.subclass(*args_, **kwargs_)
else:
return Q10Settings(*args_, **kwargs_)
factory = staticmethod(factory)
def get_fixedQ10(self): return self.fixedQ10
def set_fixedQ10(self, fixedQ10): self.fixedQ10 = fixedQ10
def validate_Nml2Quantity_none(self, value):
# Validate type Nml2Quantity_none, a restriction on xs:string.
pass
def get_experimentalTemp(self): return self.experimentalTemp
def set_experimentalTemp(self, experimentalTemp): self.experimentalTemp = experimentalTemp
def validate_Nml2Quantity_temperature(self, value):
# Validate type Nml2Quantity_temperature, a restriction on xs:string.
pass
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_q10Factor(self): return self.q10Factor
def set_q10Factor(self, q10Factor): self.q10Factor = q10Factor
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Q10Settings', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Q10Settings')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Q10Settings'):
if self.fixedQ10 is not None and 'fixedQ10' not in already_processed:
already_processed.add('fixedQ10')
outfile.write(' fixedQ10=%s' % (quote_attrib(self.fixedQ10), ))
if self.experimentalTemp is not None and 'experimentalTemp' not in already_processed:
already_processed.add('experimentalTemp')
outfile.write(' experimentalTemp=%s' % (quote_attrib(self.experimentalTemp), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.q10Factor is not None and 'q10Factor' not in already_processed:
already_processed.add('q10Factor')
outfile.write(' q10Factor=%s' % (quote_attrib(self.q10Factor), ))
def exportChildren(self, outfile, level, namespace_='', name_='Q10Settings', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Q10Settings'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.fixedQ10 is not None and 'fixedQ10' not in already_processed:
already_processed.add('fixedQ10')
showIndent(outfile, level)
outfile.write('fixedQ10="%s",\n' % (self.fixedQ10,))
if self.experimentalTemp is not None and 'experimentalTemp' not in already_processed:
already_processed.add('experimentalTemp')
showIndent(outfile, level)
outfile.write('experimentalTemp="%s",\n' % (self.experimentalTemp,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.q10Factor is not None and 'q10Factor' not in already_processed:
already_processed.add('q10Factor')
showIndent(outfile, level)
outfile.write('q10Factor="%s",\n' % (self.q10Factor,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('fixedQ10', node)
if value is not None and 'fixedQ10' not in already_processed:
already_processed.add('fixedQ10')
self.fixedQ10 = value
self.validate_Nml2Quantity_none(self.fixedQ10) # validate type Nml2Quantity_none
value = find_attr_value_('experimentalTemp', node)
if value is not None and 'experimentalTemp' not in already_processed:
already_processed.add('experimentalTemp')
self.experimentalTemp = value
self.validate_Nml2Quantity_temperature(self.experimentalTemp) # validate type Nml2Quantity_temperature
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_NmlId(self.type_) # validate type NmlId
value = find_attr_value_('q10Factor', node)
if value is not None and 'q10Factor' not in already_processed:
already_processed.add('q10Factor')
self.q10Factor = value
self.validate_Nml2Quantity_none(self.q10Factor) # validate type Nml2Quantity_none
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Q10Settings
class HHRate(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, midpoint=None, rate=None, scale=None, type_=None):
self.midpoint = _cast(None, midpoint)
self.rate = _cast(None, rate)
self.scale = _cast(None, scale)
self.type_ = _cast(None, type_)
pass
def factory(*args_, **kwargs_):
if HHRate.subclass:
return HHRate.subclass(*args_, **kwargs_)
else:
return HHRate(*args_, **kwargs_)
factory = staticmethod(factory)
def get_midpoint(self): return self.midpoint
def set_midpoint(self, midpoint): self.midpoint = midpoint
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_rate(self): return self.rate
def set_rate(self, rate): self.rate = rate
def validate_Nml2Quantity_pertime(self, value):
# Validate type Nml2Quantity_pertime, a restriction on xs:string.
pass
def get_scale(self): return self.scale
def set_scale(self, scale): self.scale = scale
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='HHRate', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='HHRate')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='HHRate'):
if self.midpoint is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
outfile.write(' midpoint=%s' % (quote_attrib(self.midpoint), ))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
outfile.write(' rate=%s' % (quote_attrib(self.rate), ))
if self.scale is not None and 'scale' not in already_processed:
already_processed.add('scale')
outfile.write(' scale=%s' % (quote_attrib(self.scale), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='', name_='HHRate', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='HHRate'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.midpoint is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
showIndent(outfile, level)
outfile.write('midpoint="%s",\n' % (self.midpoint,))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
showIndent(outfile, level)
outfile.write('rate="%s",\n' % (self.rate,))
if self.scale is not None and 'scale' not in already_processed:
already_processed.add('scale')
showIndent(outfile, level)
outfile.write('scale="%s",\n' % (self.scale,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('midpoint', node)
if value is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
self.midpoint = value
self.validate_Nml2Quantity_voltage(self.midpoint) # validate type Nml2Quantity_voltage
value = find_attr_value_('rate', node)
if value is not None and 'rate' not in already_processed:
already_processed.add('rate')
self.rate = value
self.validate_Nml2Quantity_pertime(self.rate) # validate type Nml2Quantity_pertime
value = find_attr_value_('scale', node)
if value is not None and 'scale' not in already_processed:
already_processed.add('scale')
self.scale = value
self.validate_Nml2Quantity_voltage(self.scale) # validate type Nml2Quantity_voltage
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_NmlId(self.type_) # validate type NmlId
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class HHRate
class HHVariable(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, midpoint=None, rate=None, scale=None, type_=None):
self.midpoint = _cast(None, midpoint)
self.rate = _cast(float, rate)
self.scale = _cast(None, scale)
self.type_ = _cast(None, type_)
pass
def factory(*args_, **kwargs_):
if HHVariable.subclass:
return HHVariable.subclass(*args_, **kwargs_)
else:
return HHVariable(*args_, **kwargs_)
factory = staticmethod(factory)
def get_midpoint(self): return self.midpoint
def set_midpoint(self, midpoint): self.midpoint = midpoint
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_rate(self): return self.rate
def set_rate(self, rate): self.rate = rate
def get_scale(self): return self.scale
def set_scale(self, scale): self.scale = scale
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='HHVariable', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='HHVariable')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='HHVariable'):
if self.midpoint is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
outfile.write(' midpoint=%s' % (quote_attrib(self.midpoint), ))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
outfile.write(' rate="%s"' % self.gds_format_float(self.rate, input_name='rate'))
if self.scale is not None and 'scale' not in already_processed:
already_processed.add('scale')
outfile.write(' scale=%s' % (quote_attrib(self.scale), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='', name_='HHVariable', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='HHVariable'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.midpoint is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
showIndent(outfile, level)
outfile.write('midpoint="%s",\n' % (self.midpoint,))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
showIndent(outfile, level)
outfile.write('rate=%f,\n' % (self.rate,))
if self.scale is not None and 'scale' not in already_processed:
already_processed.add('scale')
showIndent(outfile, level)
outfile.write('scale="%s",\n' % (self.scale,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('midpoint', node)
if value is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
self.midpoint = value
self.validate_Nml2Quantity_voltage(self.midpoint) # validate type Nml2Quantity_voltage
value = find_attr_value_('rate', node)
if value is not None and 'rate' not in already_processed:
already_processed.add('rate')
try:
self.rate = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (rate): %s' % exp)
value = find_attr_value_('scale', node)
if value is not None and 'scale' not in already_processed:
already_processed.add('scale')
self.scale = value
self.validate_Nml2Quantity_voltage(self.scale) # validate type Nml2Quantity_voltage
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_NmlId(self.type_) # validate type NmlId
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class HHVariable
class HHTime(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, midpoint=None, rate=None, scale=None, type_=None, tau=None):
self.midpoint = _cast(None, midpoint)
self.rate = _cast(None, rate)
self.scale = _cast(None, scale)
self.type_ = _cast(None, type_)
self.tau = _cast(None, tau)
pass
def factory(*args_, **kwargs_):
if HHTime.subclass:
return HHTime.subclass(*args_, **kwargs_)
else:
return HHTime(*args_, **kwargs_)
factory = staticmethod(factory)
def get_midpoint(self): return self.midpoint
def set_midpoint(self, midpoint): self.midpoint = midpoint
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_rate(self): return self.rate
def set_rate(self, rate): self.rate = rate
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_scale(self): return self.scale
def set_scale(self, scale): self.scale = scale
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_tau(self): return self.tau
def set_tau(self, tau): self.tau = tau
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='HHTime', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='HHTime')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='HHTime'):
if self.midpoint is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
outfile.write(' midpoint=%s' % (quote_attrib(self.midpoint), ))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
outfile.write(' rate=%s' % (quote_attrib(self.rate), ))
if self.scale is not None and 'scale' not in already_processed:
already_processed.add('scale')
outfile.write(' scale=%s' % (quote_attrib(self.scale), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.tau is not None and 'tau' not in already_processed:
already_processed.add('tau')
outfile.write(' tau=%s' % (quote_attrib(self.tau), ))
def exportChildren(self, outfile, level, namespace_='', name_='HHTime', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='HHTime'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.midpoint is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
showIndent(outfile, level)
outfile.write('midpoint="%s",\n' % (self.midpoint,))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
showIndent(outfile, level)
outfile.write('rate="%s",\n' % (self.rate,))
if self.scale is not None and 'scale' not in already_processed:
already_processed.add('scale')
showIndent(outfile, level)
outfile.write('scale="%s",\n' % (self.scale,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.tau is not None and 'tau' not in already_processed:
already_processed.add('tau')
showIndent(outfile, level)
outfile.write('tau="%s",\n' % (self.tau,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('midpoint', node)
if value is not None and 'midpoint' not in already_processed:
already_processed.add('midpoint')
self.midpoint = value
self.validate_Nml2Quantity_voltage(self.midpoint) # validate type Nml2Quantity_voltage
value = find_attr_value_('rate', node)
if value is not None and 'rate' not in already_processed:
already_processed.add('rate')
self.rate = value
self.validate_Nml2Quantity_time(self.rate) # validate type Nml2Quantity_time
value = find_attr_value_('scale', node)
if value is not None and 'scale' not in already_processed:
already_processed.add('scale')
self.scale = value
self.validate_Nml2Quantity_voltage(self.scale) # validate type Nml2Quantity_voltage
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_NmlId(self.type_) # validate type NmlId
value = find_attr_value_('tau', node)
if value is not None and 'tau' not in already_processed:
already_processed.add('tau')
self.tau = value
self.validate_Nml2Quantity_time(self.tau) # validate type Nml2Quantity_time
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class HHTime
class BlockMechanism(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, blockConcentration=None, scalingConc=None, type_=None, species=None, scalingVolt=None):
self.blockConcentration = _cast(None, blockConcentration)
self.scalingConc = _cast(None, scalingConc)
self.type_ = _cast(None, type_)
self.species = _cast(None, species)
self.scalingVolt = _cast(None, scalingVolt)
pass
def factory(*args_, **kwargs_):
if BlockMechanism.subclass:
return BlockMechanism.subclass(*args_, **kwargs_)
else:
return BlockMechanism(*args_, **kwargs_)
factory = staticmethod(factory)
def get_blockConcentration(self): return self.blockConcentration
def set_blockConcentration(self, blockConcentration): self.blockConcentration = blockConcentration
def validate_Nml2Quantity_concentration(self, value):
# Validate type Nml2Quantity_concentration, a restriction on xs:string.
pass
def get_scalingConc(self): return self.scalingConc
def set_scalingConc(self, scalingConc): self.scalingConc = scalingConc
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_BlockTypes(self, value):
# Validate type BlockTypes, a restriction on xs:string.
pass
def get_species(self): return self.species
def set_species(self, species): self.species = species
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_scalingVolt(self): return self.scalingVolt
def set_scalingVolt(self, scalingVolt): self.scalingVolt = scalingVolt
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BlockMechanism', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BlockMechanism')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BlockMechanism'):
if self.blockConcentration is not None and 'blockConcentration' not in already_processed:
already_processed.add('blockConcentration')
outfile.write(' blockConcentration=%s' % (quote_attrib(self.blockConcentration), ))
if self.scalingConc is not None and 'scalingConc' not in already_processed:
already_processed.add('scalingConc')
outfile.write(' scalingConc=%s' % (quote_attrib(self.scalingConc), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.species is not None and 'species' not in already_processed:
already_processed.add('species')
outfile.write(' species=%s' % (quote_attrib(self.species), ))
if self.scalingVolt is not None and 'scalingVolt' not in already_processed:
already_processed.add('scalingVolt')
outfile.write(' scalingVolt=%s' % (quote_attrib(self.scalingVolt), ))
def exportChildren(self, outfile, level, namespace_='', name_='BlockMechanism', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='BlockMechanism'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.blockConcentration is not None and 'blockConcentration' not in already_processed:
already_processed.add('blockConcentration')
showIndent(outfile, level)
outfile.write('blockConcentration="%s",\n' % (self.blockConcentration,))
if self.scalingConc is not None and 'scalingConc' not in already_processed:
already_processed.add('scalingConc')
showIndent(outfile, level)
outfile.write('scalingConc="%s",\n' % (self.scalingConc,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.species is not None and 'species' not in already_processed:
already_processed.add('species')
showIndent(outfile, level)
outfile.write('species="%s",\n' % (self.species,))
if self.scalingVolt is not None and 'scalingVolt' not in already_processed:
already_processed.add('scalingVolt')
showIndent(outfile, level)
outfile.write('scalingVolt="%s",\n' % (self.scalingVolt,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('blockConcentration', node)
if value is not None and 'blockConcentration' not in already_processed:
already_processed.add('blockConcentration')
self.blockConcentration = value
self.validate_Nml2Quantity_concentration(self.blockConcentration) # validate type Nml2Quantity_concentration
value = find_attr_value_('scalingConc', node)
if value is not None and 'scalingConc' not in already_processed:
already_processed.add('scalingConc')
self.scalingConc = value
self.validate_Nml2Quantity_concentration(self.scalingConc) # validate type Nml2Quantity_concentration
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_BlockTypes(self.type_) # validate type BlockTypes
value = find_attr_value_('species', node)
if value is not None and 'species' not in already_processed:
already_processed.add('species')
self.species = value
self.validate_NmlId(self.species) # validate type NmlId
value = find_attr_value_('scalingVolt', node)
if value is not None and 'scalingVolt' not in already_processed:
already_processed.add('scalingVolt')
self.scalingVolt = value
self.validate_Nml2Quantity_voltage(self.scalingVolt) # validate type Nml2Quantity_voltage
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class BlockMechanism
class PlasticityMechanism(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, type_=None, tauFac=None, tauRec=None, initReleaseProb=None):
self.type_ = _cast(None, type_)
self.tauFac = _cast(None, tauFac)
self.tauRec = _cast(None, tauRec)
self.initReleaseProb = _cast(None, initReleaseProb)
pass
def factory(*args_, **kwargs_):
if PlasticityMechanism.subclass:
return PlasticityMechanism.subclass(*args_, **kwargs_)
else:
return PlasticityMechanism(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_PlasticityTypes(self, value):
# Validate type PlasticityTypes, a restriction on xs:string.
pass
def get_tauFac(self): return self.tauFac
def set_tauFac(self, tauFac): self.tauFac = tauFac
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_tauRec(self): return self.tauRec
def set_tauRec(self, tauRec): self.tauRec = tauRec
def get_initReleaseProb(self): return self.initReleaseProb
def set_initReleaseProb(self, initReleaseProb): self.initReleaseProb = initReleaseProb
def validate_ZeroToOne(self, value):
# Validate type ZeroToOne, a restriction on xs:double.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='PlasticityMechanism', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PlasticityMechanism')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PlasticityMechanism'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.tauFac is not None and 'tauFac' not in already_processed:
already_processed.add('tauFac')
outfile.write(' tauFac=%s' % (quote_attrib(self.tauFac), ))
if self.tauRec is not None and 'tauRec' not in already_processed:
already_processed.add('tauRec')
outfile.write(' tauRec=%s' % (quote_attrib(self.tauRec), ))
if self.initReleaseProb is not None and 'initReleaseProb' not in already_processed:
already_processed.add('initReleaseProb')
outfile.write(' initReleaseProb=%s' % (quote_attrib(self.initReleaseProb), ))
def exportChildren(self, outfile, level, namespace_='', name_='PlasticityMechanism', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='PlasticityMechanism'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.tauFac is not None and 'tauFac' not in already_processed:
already_processed.add('tauFac')
showIndent(outfile, level)
outfile.write('tauFac="%s",\n' % (self.tauFac,))
if self.tauRec is not None and 'tauRec' not in already_processed:
already_processed.add('tauRec')
showIndent(outfile, level)
outfile.write('tauRec="%s",\n' % (self.tauRec,))
if self.initReleaseProb is not None and 'initReleaseProb' not in already_processed:
already_processed.add('initReleaseProb')
showIndent(outfile, level)
outfile.write('initReleaseProb=%e,\n' % (self.initReleaseProb,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_PlasticityTypes(self.type_) # validate type PlasticityTypes
value = find_attr_value_('tauFac', node)
if value is not None and 'tauFac' not in already_processed:
already_processed.add('tauFac')
self.tauFac = value
self.validate_Nml2Quantity_time(self.tauFac) # validate type Nml2Quantity_time
value = find_attr_value_('tauRec', node)
if value is not None and 'tauRec' not in already_processed:
already_processed.add('tauRec')
self.tauRec = value
self.validate_Nml2Quantity_time(self.tauRec) # validate type Nml2Quantity_time
value = find_attr_value_('initReleaseProb', node)
if value is not None and 'initReleaseProb' not in already_processed:
already_processed.add('initReleaseProb')
try:
self.initReleaseProb = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (initReleaseProb): %s' % exp)
self.validate_ZeroToOne(self.initReleaseProb) # validate type ZeroToOne
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class PlasticityMechanism
class SegmentParent(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, fractionAlong='1', segment=None):
self.fractionAlong = _cast(None, fractionAlong)
self.segment = _cast(None, segment)
pass
def factory(*args_, **kwargs_):
if SegmentParent.subclass:
return SegmentParent.subclass(*args_, **kwargs_)
else:
return SegmentParent(*args_, **kwargs_)
factory = staticmethod(factory)
def get_fractionAlong(self): return self.fractionAlong
def set_fractionAlong(self, fractionAlong): self.fractionAlong = fractionAlong
def validate_ZeroToOne(self, value):
# Validate type ZeroToOne, a restriction on xs:double.
pass
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def validate_SegmentId(self, value):
# Validate type SegmentId, a restriction on xs:nonNegativeInteger.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SegmentParent', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SegmentParent')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SegmentParent'):
if self.fractionAlong is not None and 'fractionAlong' not in already_processed:
already_processed.add('fractionAlong')
outfile.write(' fractionAlong=%s' % (quote_attrib(self.fractionAlong), ))
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
outfile.write(' segment=%s' % (quote_attrib(self.segment), ))
def exportChildren(self, outfile, level, namespace_='', name_='SegmentParent', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='SegmentParent'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.fractionAlong is not None and 'fractionAlong' not in already_processed:
already_processed.add('fractionAlong')
showIndent(outfile, level)
outfile.write('fractionAlong=%e,\n' % (self.fractionAlong,))
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
showIndent(outfile, level)
outfile.write('segment=%d,\n' % (self.segment,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('fractionAlong', node)
if value is not None and 'fractionAlong' not in already_processed:
already_processed.add('fractionAlong')
try:
self.fractionAlong = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (fractionAlong): %s' % exp)
self.validate_ZeroToOne(self.fractionAlong) # validate type ZeroToOne
value = find_attr_value_('segment', node)
if value is not None and 'segment' not in already_processed:
already_processed.add('segment')
try:
self.segment = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.segment < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_SegmentId(self.segment) # validate type SegmentId
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SegmentParent
class Point3DWithDiam(GeneratedsSuper):
"""A 3D point with diameter."""
subclass = None
superclass = None
def __init__(self, y=None, x=None, z=None, diameter=None):
self.y = _cast(float, y)
self.x = _cast(float, x)
self.z = _cast(float, z)
self.diameter = _cast(float, diameter)
pass
def factory(*args_, **kwargs_):
if Point3DWithDiam.subclass:
return Point3DWithDiam.subclass(*args_, **kwargs_)
else:
return Point3DWithDiam(*args_, **kwargs_)
factory = staticmethod(factory)
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_z(self): return self.z
def set_z(self, z): self.z = z
def get_diameter(self): return self.diameter
def set_diameter(self, diameter): self.diameter = diameter
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Point3DWithDiam', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Point3DWithDiam')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Point3DWithDiam'):
if self.y is not None and 'y' not in already_processed:
already_processed.add('y')
outfile.write(' y="%s"' % self.gds_format_double(self.y, input_name='y'))
if self.x is not None and 'x' not in already_processed:
already_processed.add('x')
outfile.write(' x="%s"' % self.gds_format_double(self.x, input_name='x'))
if self.z is not None and 'z' not in already_processed:
already_processed.add('z')
outfile.write(' z="%s"' % self.gds_format_double(self.z, input_name='z'))
if self.diameter is not None and 'diameter' not in already_processed:
already_processed.add('diameter')
outfile.write(' diameter="%s"' % self.gds_format_double(self.diameter, input_name='diameter'))
def exportChildren(self, outfile, level, namespace_='', name_='Point3DWithDiam', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Point3DWithDiam'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.y is not None and 'y' not in already_processed:
already_processed.add('y')
showIndent(outfile, level)
outfile.write('y=%e,\n' % (self.y,))
if self.x is not None and 'x' not in already_processed:
already_processed.add('x')
showIndent(outfile, level)
outfile.write('x=%e,\n' % (self.x,))
if self.z is not None and 'z' not in already_processed:
already_processed.add('z')
showIndent(outfile, level)
outfile.write('z=%e,\n' % (self.z,))
if self.diameter is not None and 'diameter' not in already_processed:
already_processed.add('diameter')
showIndent(outfile, level)
outfile.write('diameter=%e,\n' % (self.diameter,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('y', node)
if value is not None and 'y' not in already_processed:
already_processed.add('y')
try:
self.y = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (y): %s' % exp)
value = find_attr_value_('x', node)
if value is not None and 'x' not in already_processed:
already_processed.add('x')
try:
self.x = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (x): %s' % exp)
value = find_attr_value_('z', node)
if value is not None and 'z' not in already_processed:
already_processed.add('z')
try:
self.z = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (z): %s' % exp)
value = find_attr_value_('diameter', node)
if value is not None and 'diameter' not in already_processed:
already_processed.add('diameter')
try:
self.diameter = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (diameter): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Point3DWithDiam
class ProximalDetails(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, translationStart=None):
self.translationStart = _cast(float, translationStart)
pass
def factory(*args_, **kwargs_):
if ProximalDetails.subclass:
return ProximalDetails.subclass(*args_, **kwargs_)
else:
return ProximalDetails(*args_, **kwargs_)
factory = staticmethod(factory)
def get_translationStart(self): return self.translationStart
def set_translationStart(self, translationStart): self.translationStart = translationStart
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ProximalDetails', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ProximalDetails')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ProximalDetails'):
if self.translationStart is not None and 'translationStart' not in already_processed:
already_processed.add('translationStart')
outfile.write(' translationStart="%s"' % self.gds_format_double(self.translationStart, input_name='translationStart'))
def exportChildren(self, outfile, level, namespace_='', name_='ProximalDetails', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='ProximalDetails'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.translationStart is not None and 'translationStart' not in already_processed:
already_processed.add('translationStart')
showIndent(outfile, level)
outfile.write('translationStart=%e,\n' % (self.translationStart,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('translationStart', node)
if value is not None and 'translationStart' not in already_processed:
already_processed.add('translationStart')
try:
self.translationStart = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (translationStart): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ProximalDetails
class DistalDetails(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, normalizationEnd=None):
self.normalizationEnd = _cast(float, normalizationEnd)
pass
def factory(*args_, **kwargs_):
if DistalDetails.subclass:
return DistalDetails.subclass(*args_, **kwargs_)
else:
return DistalDetails(*args_, **kwargs_)
factory = staticmethod(factory)
def get_normalizationEnd(self): return self.normalizationEnd
def set_normalizationEnd(self, normalizationEnd): self.normalizationEnd = normalizationEnd
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='DistalDetails', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DistalDetails')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DistalDetails'):
if self.normalizationEnd is not None and 'normalizationEnd' not in already_processed:
already_processed.add('normalizationEnd')
outfile.write(' normalizationEnd="%s"' % self.gds_format_double(self.normalizationEnd, input_name='normalizationEnd'))
def exportChildren(self, outfile, level, namespace_='', name_='DistalDetails', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='DistalDetails'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.normalizationEnd is not None and 'normalizationEnd' not in already_processed:
already_processed.add('normalizationEnd')
showIndent(outfile, level)
outfile.write('normalizationEnd=%e,\n' % (self.normalizationEnd,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('normalizationEnd', node)
if value is not None and 'normalizationEnd' not in already_processed:
already_processed.add('normalizationEnd')
try:
self.normalizationEnd = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (normalizationEnd): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class DistalDetails
class Member(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, segment=None):
self.segment = _cast(None, segment)
pass
def factory(*args_, **kwargs_):
if Member.subclass:
return Member.subclass(*args_, **kwargs_)
else:
return Member(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def validate_SegmentId(self, value):
# Validate type SegmentId, a restriction on xs:nonNegativeInteger.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Member', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Member')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Member'):
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
outfile.write(' segment=%s' % (quote_attrib(self.segment), ))
def exportChildren(self, outfile, level, namespace_='', name_='Member', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Member'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
showIndent(outfile, level)
outfile.write('segment=%d,\n' % (self.segment,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('segment', node)
if value is not None and 'segment' not in already_processed:
already_processed.add('segment')
try:
self.segment = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.segment < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_SegmentId(self.segment) # validate type SegmentId
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Member
class Include(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, segmentGroup=None):
self.segmentGroup = _cast(None, segmentGroup)
pass
def factory(*args_, **kwargs_):
if Include.subclass:
return Include.subclass(*args_, **kwargs_)
else:
return Include(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segmentGroup(self): return self.segmentGroup
def set_segmentGroup(self, segmentGroup): self.segmentGroup = segmentGroup
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Include', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Include')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Include'):
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
outfile.write(' segmentGroup=%s' % (quote_attrib(self.segmentGroup), ))
def exportChildren(self, outfile, level, namespace_='', name_='Include', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Include'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
showIndent(outfile, level)
outfile.write('segmentGroup="%s",\n' % (self.segmentGroup,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('segmentGroup', node)
if value is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
self.segmentGroup = value
self.validate_NmlId(self.segmentGroup) # validate type NmlId
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Include
class Path(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, fromxx=None, to=None):
self.fromxx = fromxx
self.to = to
def factory(*args_, **kwargs_):
if Path.subclass:
return Path.subclass(*args_, **kwargs_)
else:
return Path(*args_, **kwargs_)
factory = staticmethod(factory)
def get_from(self): return self.fromxx
def set_from(self, fromxx): self.fromxx = fromxx
def get_to(self): return self.to
def set_to(self, to): self.to = to
def hasContent_(self):
if (
self.fromxx is not None or
self.to is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Path', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Path')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Path'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Path', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.fromxx is not None:
self.fromxx.export(outfile, level, namespace_, name_='from', pretty_print=pretty_print)
if self.to is not None:
self.to.export(outfile, level, namespace_, name_='to', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Path'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.fromxx is not None:
showIndent(outfile, level)
outfile.write('fromxx=model_.SegmentEndPoint(\n')
self.fromxx.exportLiteral(outfile, level, name_='from')
showIndent(outfile, level)
outfile.write('),\n')
if self.to is not None:
showIndent(outfile, level)
outfile.write('to=model_.SegmentEndPoint(\n')
self.to.exportLiteral(outfile, level, name_='to')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'from':
obj_ = SegmentEndPoint.factory()
obj_.build(child_)
self.set_from(obj_)
elif nodeName_ == 'to':
obj_ = SegmentEndPoint.factory()
obj_.build(child_)
self.set_to(obj_)
# end class Path
class SubTree(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, fromxx=None, to=None):
self.fromxx = fromxx
self.to = to
def factory(*args_, **kwargs_):
if SubTree.subclass:
return SubTree.subclass(*args_, **kwargs_)
else:
return SubTree(*args_, **kwargs_)
factory = staticmethod(factory)
def get_from(self): return self.fromxx
def set_from(self, fromxx): self.fromxx = fromxx
def get_to(self): return self.to
def set_to(self, to): self.to = to
def hasContent_(self):
if (
self.fromxx is not None or
self.to is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SubTree', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SubTree')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SubTree'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='SubTree', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.fromxx is not None:
self.fromxx.export(outfile, level, namespace_, name_='from', pretty_print=pretty_print)
if self.to is not None:
self.to.export(outfile, level, namespace_, name_='to', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SubTree'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.fromxx is not None:
showIndent(outfile, level)
outfile.write('fromxx=model_.SegmentEndPoint(\n')
self.fromxx.exportLiteral(outfile, level, name_='from')
showIndent(outfile, level)
outfile.write('),\n')
if self.to is not None:
showIndent(outfile, level)
outfile.write('to=model_.SegmentEndPoint(\n')
self.to.exportLiteral(outfile, level, name_='to')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'from':
obj_ = SegmentEndPoint.factory()
obj_.build(child_)
self.set_from(obj_)
elif nodeName_ == 'to':
obj_ = SegmentEndPoint.factory()
obj_.build(child_)
self.set_to(obj_)
# end class SubTree
class SegmentEndPoint(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, segment=None):
self.segment = _cast(None, segment)
pass
def factory(*args_, **kwargs_):
if SegmentEndPoint.subclass:
return SegmentEndPoint.subclass(*args_, **kwargs_)
else:
return SegmentEndPoint(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def validate_SegmentId(self, value):
# Validate type SegmentId, a restriction on xs:nonNegativeInteger.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SegmentEndPoint', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SegmentEndPoint')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SegmentEndPoint'):
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
outfile.write(' segment=%s' % (quote_attrib(self.segment), ))
def exportChildren(self, outfile, level, namespace_='', name_='SegmentEndPoint', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='SegmentEndPoint'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
showIndent(outfile, level)
outfile.write('segment=%d,\n' % (self.segment,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('segment', node)
if value is not None and 'segment' not in already_processed:
already_processed.add('segment')
try:
self.segment = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.segment < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_SegmentId(self.segment) # validate type SegmentId
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SegmentEndPoint
class MembraneProperties(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, channelPopulation=None, channelDensity=None, spikeThresh=None, specificCapacitance=None, initMembPotential=None, reversalPotential=None):
if channelPopulation is None:
self.channelPopulation = []
else:
self.channelPopulation = channelPopulation
if channelDensity is None:
self.channelDensity = []
else:
self.channelDensity = channelDensity
if spikeThresh is None:
self.spikeThresh = []
else:
self.spikeThresh = spikeThresh
if specificCapacitance is None:
self.specificCapacitance = []
else:
self.specificCapacitance = specificCapacitance
if initMembPotential is None:
self.initMembPotential = []
else:
self.initMembPotential = initMembPotential
if reversalPotential is None:
self.reversalPotential = []
else:
self.reversalPotential = reversalPotential
def factory(*args_, **kwargs_):
if MembraneProperties.subclass:
return MembraneProperties.subclass(*args_, **kwargs_)
else:
return MembraneProperties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_channelPopulation(self): return self.channelPopulation
def set_channelPopulation(self, channelPopulation): self.channelPopulation = channelPopulation
def add_channelPopulation(self, value): self.channelPopulation.append(value)
def insert_channelPopulation(self, index, value): self.channelPopulation[index] = value
def get_channelDensity(self): return self.channelDensity
def set_channelDensity(self, channelDensity): self.channelDensity = channelDensity
def add_channelDensity(self, value): self.channelDensity.append(value)
def insert_channelDensity(self, index, value): self.channelDensity[index] = value
def get_spikeThresh(self): return self.spikeThresh
def set_spikeThresh(self, spikeThresh): self.spikeThresh = spikeThresh
def add_spikeThresh(self, value): self.spikeThresh.append(value)
def insert_spikeThresh(self, index, value): self.spikeThresh[index] = value
def get_specificCapacitance(self): return self.specificCapacitance
def set_specificCapacitance(self, specificCapacitance): self.specificCapacitance = specificCapacitance
def add_specificCapacitance(self, value): self.specificCapacitance.append(value)
def insert_specificCapacitance(self, index, value): self.specificCapacitance[index] = value
def get_initMembPotential(self): return self.initMembPotential
def set_initMembPotential(self, initMembPotential): self.initMembPotential = initMembPotential
def add_initMembPotential(self, value): self.initMembPotential.append(value)
def insert_initMembPotential(self, index, value): self.initMembPotential[index] = value
def get_reversalPotential(self): return self.reversalPotential
def set_reversalPotential(self, reversalPotential): self.reversalPotential = reversalPotential
def add_reversalPotential(self, value): self.reversalPotential.append(value)
def insert_reversalPotential(self, index, value): self.reversalPotential[index] = value
def hasContent_(self):
if (
self.channelPopulation or
self.channelDensity or
self.spikeThresh or
self.specificCapacitance or
self.initMembPotential or
self.reversalPotential
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MembraneProperties', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MembraneProperties')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MembraneProperties'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='MembraneProperties', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for channelPopulation_ in self.channelPopulation:
channelPopulation_.export(outfile, level, namespace_, name_='channelPopulation', pretty_print=pretty_print)
for channelDensity_ in self.channelDensity:
channelDensity_.export(outfile, level, namespace_, name_='channelDensity', pretty_print=pretty_print)
for spikeThresh_ in self.spikeThresh:
spikeThresh_.export(outfile, level, namespace_, name_='spikeThresh', pretty_print=pretty_print)
for specificCapacitance_ in self.specificCapacitance:
specificCapacitance_.export(outfile, level, namespace_, name_='specificCapacitance', pretty_print=pretty_print)
for initMembPotential_ in self.initMembPotential:
initMembPotential_.export(outfile, level, namespace_, name_='initMembPotential', pretty_print=pretty_print)
for reversalPotential_ in self.reversalPotential:
reversalPotential_.export(outfile, level, namespace_, name_='reversalPotential', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='MembraneProperties'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('channelPopulation=[\n')
level += 1
for channelPopulation_ in self.channelPopulation:
showIndent(outfile, level)
outfile.write('model_.ChannelPopulation(\n')
channelPopulation_.exportLiteral(outfile, level, name_='ChannelPopulation')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('channelDensity=[\n')
level += 1
for channelDensity_ in self.channelDensity:
showIndent(outfile, level)
outfile.write('model_.ChannelDensity(\n')
channelDensity_.exportLiteral(outfile, level, name_='ChannelDensity')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('spikeThresh=[\n')
level += 1
for spikeThresh_ in self.spikeThresh:
showIndent(outfile, level)
outfile.write('model_.ValueAcrossSegOrSegGroup(\n')
spikeThresh_.exportLiteral(outfile, level, name_='ValueAcrossSegOrSegGroup')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('specificCapacitance=[\n')
level += 1
for specificCapacitance_ in self.specificCapacitance:
showIndent(outfile, level)
outfile.write('model_.ValueAcrossSegOrSegGroup(\n')
specificCapacitance_.exportLiteral(outfile, level, name_='ValueAcrossSegOrSegGroup')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('initMembPotential=[\n')
level += 1
for initMembPotential_ in self.initMembPotential:
showIndent(outfile, level)
outfile.write('model_.ValueAcrossSegOrSegGroup(\n')
initMembPotential_.exportLiteral(outfile, level, name_='ValueAcrossSegOrSegGroup')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('reversalPotential=[\n')
level += 1
for reversalPotential_ in self.reversalPotential:
showIndent(outfile, level)
outfile.write('model_.ReversalPotential(\n')
reversalPotential_.exportLiteral(outfile, level, name_='ReversalPotential')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'channelPopulation':
obj_ = ChannelPopulation.factory()
obj_.build(child_)
self.channelPopulation.append(obj_)
elif nodeName_ == 'channelDensity':
obj_ = ChannelDensity.factory()
obj_.build(child_)
self.channelDensity.append(obj_)
elif nodeName_ == 'spikeThresh':
class_obj_ = self.get_class_obj_(child_, ValueAcrossSegOrSegGroup)
obj_ = class_obj_.factory()
obj_.build(child_)
self.spikeThresh.append(obj_)
elif nodeName_ == 'specificCapacitance':
class_obj_ = self.get_class_obj_(child_, ValueAcrossSegOrSegGroup)
obj_ = class_obj_.factory()
obj_.build(child_)
self.specificCapacitance.append(obj_)
elif nodeName_ == 'initMembPotential':
class_obj_ = self.get_class_obj_(child_, ValueAcrossSegOrSegGroup)
obj_ = class_obj_.factory()
obj_.build(child_)
self.initMembPotential.append(obj_)
elif nodeName_ == 'reversalPotential':
obj_ = ReversalPotential.factory()
obj_.build(child_)
self.reversalPotential.append(obj_)
# end class MembraneProperties
class ValueAcrossSegOrSegGroup(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, segment=None, segmentGroup='all', value=None, extensiontype_=None):
self.segment = _cast(None, segment)
self.segmentGroup = _cast(None, segmentGroup)
self.value = _cast(None, value)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ValueAcrossSegOrSegGroup.subclass:
return ValueAcrossSegOrSegGroup.subclass(*args_, **kwargs_)
else:
return ValueAcrossSegOrSegGroup(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_segmentGroup(self): return self.segmentGroup
def set_segmentGroup(self, segmentGroup): self.segmentGroup = segmentGroup
def get_value(self): return self.value
def set_value(self, value): self.value = value
def validate_Nml2Quantity(self, value):
# Validate type Nml2Quantity, a restriction on xs:string.
pass
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ValueAcrossSegOrSegGroup', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ValueAcrossSegOrSegGroup')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ValueAcrossSegOrSegGroup'):
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
outfile.write(' segment=%s' % (quote_attrib(self.segment), ))
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
outfile.write(' segmentGroup=%s' % (quote_attrib(self.segmentGroup), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (quote_attrib(self.value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ValueAcrossSegOrSegGroup', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='ValueAcrossSegOrSegGroup'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
showIndent(outfile, level)
outfile.write('segment="%s",\n' % (self.segment,))
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
showIndent(outfile, level)
outfile.write('segmentGroup="%s",\n' % (self.segmentGroup,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('segment', node)
if value is not None and 'segment' not in already_processed:
already_processed.add('segment')
self.segment = value
self.validate_NmlId(self.segment) # validate type NmlId
value = find_attr_value_('segmentGroup', node)
if value is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
self.segmentGroup = value
self.validate_NmlId(self.segmentGroup) # validate type NmlId
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
self.validate_Nml2Quantity(self.value) # validate type Nml2Quantity
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ValueAcrossSegOrSegGroup
class VariableParameter(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, segmentGroup=None, parameter=None, inhomogeneousValue=None):
self.segmentGroup = _cast(None, segmentGroup)
self.parameter = _cast(None, parameter)
self.inhomogeneousValue = inhomogeneousValue
def factory(*args_, **kwargs_):
if VariableParameter.subclass:
return VariableParameter.subclass(*args_, **kwargs_)
else:
return VariableParameter(*args_, **kwargs_)
factory = staticmethod(factory)
def get_inhomogeneousValue(self): return self.inhomogeneousValue
def set_inhomogeneousValue(self, inhomogeneousValue): self.inhomogeneousValue = inhomogeneousValue
def get_segmentGroup(self): return self.segmentGroup
def set_segmentGroup(self, segmentGroup): self.segmentGroup = segmentGroup
def get_parameter(self): return self.parameter
def set_parameter(self, parameter): self.parameter = parameter
def hasContent_(self):
if (
self.inhomogeneousValue is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VariableParameter', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VariableParameter')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VariableParameter'):
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
outfile.write(' segmentGroup=%s' % (self.gds_format_string(quote_attrib(self.segmentGroup).encode(ExternalEncoding), input_name='segmentGroup'), ))
if self.parameter is not None and 'parameter' not in already_processed:
already_processed.add('parameter')
outfile.write(' parameter=%s' % (self.gds_format_string(quote_attrib(self.parameter).encode(ExternalEncoding), input_name='parameter'), ))
def exportChildren(self, outfile, level, namespace_='', name_='VariableParameter', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.inhomogeneousValue is not None:
self.inhomogeneousValue.export(outfile, level, namespace_, name_='inhomogeneousValue', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VariableParameter'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
showIndent(outfile, level)
outfile.write('segmentGroup="%s",\n' % (self.segmentGroup,))
if self.parameter is not None and 'parameter' not in already_processed:
already_processed.add('parameter')
showIndent(outfile, level)
outfile.write('parameter="%s",\n' % (self.parameter,))
def exportLiteralChildren(self, outfile, level, name_):
if self.inhomogeneousValue is not None:
showIndent(outfile, level)
outfile.write('inhomogeneousValue=model_.InhomogeneousValue(\n')
self.inhomogeneousValue.exportLiteral(outfile, level, name_='inhomogeneousValue')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('segmentGroup', node)
if value is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
self.segmentGroup = value
value = find_attr_value_('parameter', node)
if value is not None and 'parameter' not in already_processed:
already_processed.add('parameter')
self.parameter = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'inhomogeneousValue':
obj_ = InhomogeneousValue.factory()
obj_.build(child_)
self.set_inhomogeneousValue(obj_)
# end class VariableParameter
class InhomogeneousValue(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, inhomogeneousParam=None, value=None):
self.inhomogeneousParam = _cast(None, inhomogeneousParam)
self.value = _cast(None, value)
pass
def factory(*args_, **kwargs_):
if InhomogeneousValue.subclass:
return InhomogeneousValue.subclass(*args_, **kwargs_)
else:
return InhomogeneousValue(*args_, **kwargs_)
factory = staticmethod(factory)
def get_inhomogeneousParam(self): return self.inhomogeneousParam
def set_inhomogeneousParam(self, inhomogeneousParam): self.inhomogeneousParam = inhomogeneousParam
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='InhomogeneousValue', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='InhomogeneousValue')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InhomogeneousValue'):
if self.inhomogeneousParam is not None and 'inhomogeneousParam' not in already_processed:
already_processed.add('inhomogeneousParam')
outfile.write(' inhomogeneousParam=%s' % (self.gds_format_string(quote_attrib(self.inhomogeneousParam).encode(ExternalEncoding), input_name='inhomogeneousParam'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='', name_='InhomogeneousValue', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='InhomogeneousValue'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.inhomogeneousParam is not None and 'inhomogeneousParam' not in already_processed:
already_processed.add('inhomogeneousParam')
showIndent(outfile, level)
outfile.write('inhomogeneousParam="%s",\n' % (self.inhomogeneousParam,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('inhomogeneousParam', node)
if value is not None and 'inhomogeneousParam' not in already_processed:
already_processed.add('inhomogeneousParam')
self.inhomogeneousParam = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class InhomogeneousValue
class ReversalPotential(ValueAcrossSegOrSegGroup):
subclass = None
superclass = ValueAcrossSegOrSegGroup
def __init__(self, segment=None, segmentGroup='all', value=None, species=None):
super(ReversalPotential, self).__init__(segment, segmentGroup, value, )
self.species = _cast(None, species)
pass
def factory(*args_, **kwargs_):
if ReversalPotential.subclass:
return ReversalPotential.subclass(*args_, **kwargs_)
else:
return ReversalPotential(*args_, **kwargs_)
factory = staticmethod(factory)
def get_species(self): return self.species
def set_species(self, species): self.species = species
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(ReversalPotential, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReversalPotential', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReversalPotential')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReversalPotential'):
super(ReversalPotential, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReversalPotential')
if self.species is not None and 'species' not in already_processed:
already_processed.add('species')
outfile.write(' species=%s' % (quote_attrib(self.species), ))
def exportChildren(self, outfile, level, namespace_='', name_='ReversalPotential', fromsubclass_=False, pretty_print=True):
super(ReversalPotential, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='ReversalPotential'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.species is not None and 'species' not in already_processed:
already_processed.add('species')
showIndent(outfile, level)
outfile.write('species="%s",\n' % (self.species,))
super(ReversalPotential, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReversalPotential, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('species', node)
if value is not None and 'species' not in already_processed:
already_processed.add('species')
self.species = value
self.validate_NmlId(self.species) # validate type NmlId
super(ReversalPotential, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ReversalPotential, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ReversalPotential
class Species(ValueAcrossSegOrSegGroup):
"""Specifying the ion here again is redundant, the ion name should be
the same as id. Kept for now until LEMS implementation can
select by id. TODO: remove."""
subclass = None
superclass = ValueAcrossSegOrSegGroup
def __init__(self, segment=None, segmentGroup='all', value=None, ion=None, initialExtConcentration=None, concentrationModel=None, id=None, initialConcentration=None):
super(Species, self).__init__(segment, segmentGroup, value, )
self.ion = _cast(None, ion)
self.initialExtConcentration = _cast(None, initialExtConcentration)
self.concentrationModel = _cast(None, concentrationModel)
self.id = _cast(None, id)
self.initialConcentration = _cast(None, initialConcentration)
pass
def factory(*args_, **kwargs_):
if Species.subclass:
return Species.subclass(*args_, **kwargs_)
else:
return Species(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ion(self): return self.ion
def set_ion(self, ion): self.ion = ion
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_initialExtConcentration(self): return self.initialExtConcentration
def set_initialExtConcentration(self, initialExtConcentration): self.initialExtConcentration = initialExtConcentration
def validate_Nml2Quantity_concentration(self, value):
# Validate type Nml2Quantity_concentration, a restriction on xs:string.
pass
def get_concentrationModel(self): return self.concentrationModel
def set_concentrationModel(self, concentrationModel): self.concentrationModel = concentrationModel
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_initialConcentration(self): return self.initialConcentration
def set_initialConcentration(self, initialConcentration): self.initialConcentration = initialConcentration
def hasContent_(self):
if (
super(Species, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Species', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Species')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Species'):
super(Species, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Species')
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
outfile.write(' ion=%s' % (quote_attrib(self.ion), ))
if self.initialExtConcentration is not None and 'initialExtConcentration' not in already_processed:
already_processed.add('initialExtConcentration')
outfile.write(' initialExtConcentration=%s' % (quote_attrib(self.initialExtConcentration), ))
if self.concentrationModel is not None and 'concentrationModel' not in already_processed:
already_processed.add('concentrationModel')
outfile.write(' concentrationModel=%s' % (quote_attrib(self.concentrationModel), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (quote_attrib(self.id), ))
if self.initialConcentration is not None and 'initialConcentration' not in already_processed:
already_processed.add('initialConcentration')
outfile.write(' initialConcentration=%s' % (quote_attrib(self.initialConcentration), ))
def exportChildren(self, outfile, level, namespace_='', name_='Species', fromsubclass_=False, pretty_print=True):
super(Species, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='Species'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
showIndent(outfile, level)
outfile.write('ion="%s",\n' % (self.ion,))
if self.initialExtConcentration is not None and 'initialExtConcentration' not in already_processed:
already_processed.add('initialExtConcentration')
showIndent(outfile, level)
outfile.write('initialExtConcentration="%s",\n' % (self.initialExtConcentration,))
if self.concentrationModel is not None and 'concentrationModel' not in already_processed:
already_processed.add('concentrationModel')
showIndent(outfile, level)
outfile.write('concentrationModel="%s",\n' % (self.concentrationModel,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.initialConcentration is not None and 'initialConcentration' not in already_processed:
already_processed.add('initialConcentration')
showIndent(outfile, level)
outfile.write('initialConcentration="%s",\n' % (self.initialConcentration,))
super(Species, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Species, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ion', node)
if value is not None and 'ion' not in already_processed:
already_processed.add('ion')
self.ion = value
self.validate_NmlId(self.ion) # validate type NmlId
value = find_attr_value_('initialExtConcentration', node)
if value is not None and 'initialExtConcentration' not in already_processed:
already_processed.add('initialExtConcentration')
self.initialExtConcentration = value
self.validate_Nml2Quantity_concentration(self.initialExtConcentration) # validate type Nml2Quantity_concentration
value = find_attr_value_('concentrationModel', node)
if value is not None and 'concentrationModel' not in already_processed:
already_processed.add('concentrationModel')
self.concentrationModel = value
self.validate_NmlId(self.concentrationModel) # validate type NmlId
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
self.validate_NmlId(self.id) # validate type NmlId
value = find_attr_value_('initialConcentration', node)
if value is not None and 'initialConcentration' not in already_processed:
already_processed.add('initialConcentration')
self.initialConcentration = value
self.validate_Nml2Quantity_concentration(self.initialConcentration) # validate type Nml2Quantity_concentration
super(Species, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Species, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Species
class IntracellularProperties(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, species=None, resistivity=None):
if species is None:
self.species = []
else:
self.species = species
if resistivity is None:
self.resistivity = []
else:
self.resistivity = resistivity
def factory(*args_, **kwargs_):
if IntracellularProperties.subclass:
return IntracellularProperties.subclass(*args_, **kwargs_)
else:
return IntracellularProperties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_species(self): return self.species
def set_species(self, species): self.species = species
def add_species(self, value): self.species.append(value)
def insert_species(self, index, value): self.species[index] = value
def get_resistivity(self): return self.resistivity
def set_resistivity(self, resistivity): self.resistivity = resistivity
def add_resistivity(self, value): self.resistivity.append(value)
def insert_resistivity(self, index, value): self.resistivity[index] = value
def hasContent_(self):
if (
self.species or
self.resistivity
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IntracellularProperties', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IntracellularProperties')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IntracellularProperties'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='IntracellularProperties', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for species_ in self.species:
species_.export(outfile, level, namespace_, name_='species', pretty_print=pretty_print)
for resistivity_ in self.resistivity:
resistivity_.export(outfile, level, namespace_, name_='resistivity', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IntracellularProperties'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('species=[\n')
level += 1
for species_ in self.species:
showIndent(outfile, level)
outfile.write('model_.Species(\n')
species_.exportLiteral(outfile, level, name_='Species')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('resistivity=[\n')
level += 1
for resistivity_ in self.resistivity:
showIndent(outfile, level)
outfile.write('model_.ValueAcrossSegOrSegGroup(\n')
resistivity_.exportLiteral(outfile, level, name_='ValueAcrossSegOrSegGroup')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'species':
obj_ = Species.factory()
obj_.build(child_)
self.species.append(obj_)
elif nodeName_ == 'resistivity':
class_obj_ = self.get_class_obj_(child_, ValueAcrossSegOrSegGroup)
obj_ = class_obj_.factory()
obj_.build(child_)
self.resistivity.append(obj_)
# end class IntracellularProperties
class ExtracellularPropertiesLocal(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, temperature=None, species=None):
self.temperature = _cast(None, temperature)
if species is None:
self.species = []
else:
self.species = species
def factory(*args_, **kwargs_):
if ExtracellularPropertiesLocal.subclass:
return ExtracellularPropertiesLocal.subclass(*args_, **kwargs_)
else:
return ExtracellularPropertiesLocal(*args_, **kwargs_)
factory = staticmethod(factory)
def get_species(self): return self.species
def set_species(self, species): self.species = species
def add_species(self, value): self.species.append(value)
def insert_species(self, index, value): self.species[index] = value
def get_temperature(self): return self.temperature
def set_temperature(self, temperature): self.temperature = temperature
def validate_Nml2Quantity_temperature(self, value):
# Validate type Nml2Quantity_temperature, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.species
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExtracellularPropertiesLocal', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExtracellularPropertiesLocal')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExtracellularPropertiesLocal'):
if self.temperature is not None and 'temperature' not in already_processed:
already_processed.add('temperature')
outfile.write(' temperature=%s' % (quote_attrib(self.temperature), ))
def exportChildren(self, outfile, level, namespace_='', name_='ExtracellularPropertiesLocal', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for species_ in self.species:
species_.export(outfile, level, namespace_, name_='species', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ExtracellularPropertiesLocal'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.temperature is not None and 'temperature' not in already_processed:
already_processed.add('temperature')
showIndent(outfile, level)
outfile.write('temperature="%s",\n' % (self.temperature,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('species=[\n')
level += 1
for species_ in self.species:
showIndent(outfile, level)
outfile.write('model_.Species(\n')
species_.exportLiteral(outfile, level, name_='Species')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('temperature', node)
if value is not None and 'temperature' not in already_processed:
already_processed.add('temperature')
self.temperature = value
self.validate_Nml2Quantity_temperature(self.temperature) # validate type Nml2Quantity_temperature
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'species':
obj_ = Species.factory()
obj_.build(child_)
self.species.append(obj_)
# end class ExtracellularPropertiesLocal
class SpaceStructure(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ySpacing=None, zStart=0, yStart=0, zSpacing=None, xStart=0, xSpacing=None):
self.ySpacing = _cast(float, ySpacing)
self.zStart = _cast(float, zStart)
self.yStart = _cast(float, yStart)
self.zSpacing = _cast(float, zSpacing)
self.xStart = _cast(float, xStart)
self.xSpacing = _cast(float, xSpacing)
pass
def factory(*args_, **kwargs_):
if SpaceStructure.subclass:
return SpaceStructure.subclass(*args_, **kwargs_)
else:
return SpaceStructure(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ySpacing(self): return self.ySpacing
def set_ySpacing(self, ySpacing): self.ySpacing = ySpacing
def get_zStart(self): return self.zStart
def set_zStart(self, zStart): self.zStart = zStart
def get_yStart(self): return self.yStart
def set_yStart(self, yStart): self.yStart = yStart
def get_zSpacing(self): return self.zSpacing
def set_zSpacing(self, zSpacing): self.zSpacing = zSpacing
def get_xStart(self): return self.xStart
def set_xStart(self, xStart): self.xStart = xStart
def get_xSpacing(self): return self.xSpacing
def set_xSpacing(self, xSpacing): self.xSpacing = xSpacing
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SpaceStructure', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SpaceStructure')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SpaceStructure'):
if self.ySpacing is not None and 'ySpacing' not in already_processed:
already_processed.add('ySpacing')
outfile.write(' ySpacing="%s"' % self.gds_format_float(self.ySpacing, input_name='ySpacing'))
if self.zStart is not None and 'zStart' not in already_processed:
already_processed.add('zStart')
outfile.write(' zStart="%s"' % self.gds_format_float(self.zStart, input_name='zStart'))
if self.yStart is not None and 'yStart' not in already_processed:
already_processed.add('yStart')
outfile.write(' yStart="%s"' % self.gds_format_float(self.yStart, input_name='yStart'))
if self.zSpacing is not None and 'zSpacing' not in already_processed:
already_processed.add('zSpacing')
outfile.write(' zSpacing="%s"' % self.gds_format_float(self.zSpacing, input_name='zSpacing'))
if self.xStart is not None and 'xStart' not in already_processed:
already_processed.add('xStart')
outfile.write(' xStart="%s"' % self.gds_format_float(self.xStart, input_name='xStart'))
if self.xSpacing is not None and 'xSpacing' not in already_processed:
already_processed.add('xSpacing')
outfile.write(' xSpacing="%s"' % self.gds_format_float(self.xSpacing, input_name='xSpacing'))
def exportChildren(self, outfile, level, namespace_='', name_='SpaceStructure', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='SpaceStructure'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ySpacing is not None and 'ySpacing' not in already_processed:
already_processed.add('ySpacing')
showIndent(outfile, level)
outfile.write('ySpacing=%f,\n' % (self.ySpacing,))
if self.zStart is not None and 'zStart' not in already_processed:
already_processed.add('zStart')
showIndent(outfile, level)
outfile.write('zStart=%f,\n' % (self.zStart,))
if self.yStart is not None and 'yStart' not in already_processed:
already_processed.add('yStart')
showIndent(outfile, level)
outfile.write('yStart=%f,\n' % (self.yStart,))
if self.zSpacing is not None and 'zSpacing' not in already_processed:
already_processed.add('zSpacing')
showIndent(outfile, level)
outfile.write('zSpacing=%f,\n' % (self.zSpacing,))
if self.xStart is not None and 'xStart' not in already_processed:
already_processed.add('xStart')
showIndent(outfile, level)
outfile.write('xStart=%f,\n' % (self.xStart,))
if self.xSpacing is not None and 'xSpacing' not in already_processed:
already_processed.add('xSpacing')
showIndent(outfile, level)
outfile.write('xSpacing=%f,\n' % (self.xSpacing,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ySpacing', node)
if value is not None and 'ySpacing' not in already_processed:
already_processed.add('ySpacing')
try:
self.ySpacing = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (ySpacing): %s' % exp)
value = find_attr_value_('zStart', node)
if value is not None and 'zStart' not in already_processed:
already_processed.add('zStart')
try:
self.zStart = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (zStart): %s' % exp)
value = find_attr_value_('yStart', node)
if value is not None and 'yStart' not in already_processed:
already_processed.add('yStart')
try:
self.yStart = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (yStart): %s' % exp)
value = find_attr_value_('zSpacing', node)
if value is not None and 'zSpacing' not in already_processed:
already_processed.add('zSpacing')
try:
self.zSpacing = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (zSpacing): %s' % exp)
value = find_attr_value_('xStart', node)
if value is not None and 'xStart' not in already_processed:
already_processed.add('xStart')
try:
self.xStart = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (xStart): %s' % exp)
value = find_attr_value_('xSpacing', node)
if value is not None and 'xSpacing' not in already_processed:
already_processed.add('xSpacing')
try:
self.xSpacing = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (xSpacing): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SpaceStructure
class Layout(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, space=None, random=None, grid=None, unstructured=None):
self.space = _cast(None, space)
self.random = random
self.grid = grid
self.unstructured = unstructured
def factory(*args_, **kwargs_):
if Layout.subclass:
return Layout.subclass(*args_, **kwargs_)
else:
return Layout(*args_, **kwargs_)
factory = staticmethod(factory)
def get_random(self): return self.random
def set_random(self, random): self.random = random
def get_grid(self): return self.grid
def set_grid(self, grid): self.grid = grid
def get_unstructured(self): return self.unstructured
def set_unstructured(self, unstructured): self.unstructured = unstructured
def get_space(self): return self.space
def set_space(self, space): self.space = space
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.random is not None or
self.grid is not None or
self.unstructured is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Layout', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Layout')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Layout'):
if self.space is not None and 'space' not in already_processed:
already_processed.add('space')
outfile.write(' space=%s' % (quote_attrib(self.space), ))
def exportChildren(self, outfile, level, namespace_='', name_='Layout', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.random is not None:
self.random.export(outfile, level, namespace_, name_='random', pretty_print=pretty_print)
if self.grid is not None:
self.grid.export(outfile, level, namespace_, name_='grid', pretty_print=pretty_print)
if self.unstructured is not None:
self.unstructured.export(outfile, level, namespace_, name_='unstructured', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Layout'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.space is not None and 'space' not in already_processed:
already_processed.add('space')
showIndent(outfile, level)
outfile.write('space="%s",\n' % (self.space,))
def exportLiteralChildren(self, outfile, level, name_):
if self.random is not None:
showIndent(outfile, level)
outfile.write('random=model_.RandomLayout(\n')
self.random.exportLiteral(outfile, level, name_='random')
showIndent(outfile, level)
outfile.write('),\n')
if self.grid is not None:
showIndent(outfile, level)
outfile.write('grid=model_.GridLayout(\n')
self.grid.exportLiteral(outfile, level, name_='grid')
showIndent(outfile, level)
outfile.write('),\n')
if self.unstructured is not None:
showIndent(outfile, level)
outfile.write('unstructured=model_.UnstructuredLayout(\n')
self.unstructured.exportLiteral(outfile, level, name_='unstructured')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('space', node)
if value is not None and 'space' not in already_processed:
already_processed.add('space')
self.space = value
self.validate_NmlId(self.space) # validate type NmlId
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'random':
obj_ = RandomLayout.factory()
obj_.build(child_)
self.set_random(obj_)
elif nodeName_ == 'grid':
obj_ = GridLayout.factory()
obj_.build(child_)
self.set_grid(obj_)
elif nodeName_ == 'unstructured':
obj_ = UnstructuredLayout.factory()
obj_.build(child_)
self.set_unstructured(obj_)
# end class Layout
class UnstructuredLayout(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, number=None):
self.number = _cast(int, number)
pass
def factory(*args_, **kwargs_):
if UnstructuredLayout.subclass:
return UnstructuredLayout.subclass(*args_, **kwargs_)
else:
return UnstructuredLayout(*args_, **kwargs_)
factory = staticmethod(factory)
def get_number(self): return self.number
def set_number(self, number): self.number = number
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='UnstructuredLayout', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='UnstructuredLayout')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='UnstructuredLayout'):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
outfile.write(' number="%s"' % self.gds_format_integer(self.number, input_name='number'))
def exportChildren(self, outfile, level, namespace_='', name_='UnstructuredLayout', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='UnstructuredLayout'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
showIndent(outfile, level)
outfile.write('number=%d,\n' % (self.number,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('number', node)
if value is not None and 'number' not in already_processed:
already_processed.add('number')
try:
self.number = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.number < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class UnstructuredLayout
class RandomLayout(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, region=None, number=None):
self.region = _cast(None, region)
self.number = _cast(int, number)
pass
def factory(*args_, **kwargs_):
if RandomLayout.subclass:
return RandomLayout.subclass(*args_, **kwargs_)
else:
return RandomLayout(*args_, **kwargs_)
factory = staticmethod(factory)
def get_region(self): return self.region
def set_region(self, region): self.region = region
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_number(self): return self.number
def set_number(self, number): self.number = number
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='RandomLayout', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RandomLayout')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RandomLayout'):
if self.region is not None and 'region' not in already_processed:
already_processed.add('region')
outfile.write(' region=%s' % (quote_attrib(self.region), ))
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
outfile.write(' number="%s"' % self.gds_format_integer(self.number, input_name='number'))
def exportChildren(self, outfile, level, namespace_='', name_='RandomLayout', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='RandomLayout'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.region is not None and 'region' not in already_processed:
already_processed.add('region')
showIndent(outfile, level)
outfile.write('region="%s",\n' % (self.region,))
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
showIndent(outfile, level)
outfile.write('number=%d,\n' % (self.number,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('region', node)
if value is not None and 'region' not in already_processed:
already_processed.add('region')
self.region = value
self.validate_NmlId(self.region) # validate type NmlId
value = find_attr_value_('number', node)
if value is not None and 'number' not in already_processed:
already_processed.add('number')
try:
self.number = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.number < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class RandomLayout
class GridLayout(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, zSize=None, ySize=None, xSize=None):
self.zSize = _cast(int, zSize)
self.ySize = _cast(int, ySize)
self.xSize = _cast(int, xSize)
pass
def factory(*args_, **kwargs_):
if GridLayout.subclass:
return GridLayout.subclass(*args_, **kwargs_)
else:
return GridLayout(*args_, **kwargs_)
factory = staticmethod(factory)
def get_zSize(self): return self.zSize
def set_zSize(self, zSize): self.zSize = zSize
def get_ySize(self): return self.ySize
def set_ySize(self, ySize): self.ySize = ySize
def get_xSize(self): return self.xSize
def set_xSize(self, xSize): self.xSize = xSize
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='GridLayout', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='GridLayout')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GridLayout'):
if self.zSize is not None and 'zSize' not in already_processed:
already_processed.add('zSize')
outfile.write(' zSize="%s"' % self.gds_format_integer(self.zSize, input_name='zSize'))
if self.ySize is not None and 'ySize' not in already_processed:
already_processed.add('ySize')
outfile.write(' ySize="%s"' % self.gds_format_integer(self.ySize, input_name='ySize'))
if self.xSize is not None and 'xSize' not in already_processed:
already_processed.add('xSize')
outfile.write(' xSize="%s"' % self.gds_format_integer(self.xSize, input_name='xSize'))
def exportChildren(self, outfile, level, namespace_='', name_='GridLayout', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='GridLayout'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.zSize is not None and 'zSize' not in already_processed:
already_processed.add('zSize')
showIndent(outfile, level)
outfile.write('zSize=%d,\n' % (self.zSize,))
if self.ySize is not None and 'ySize' not in already_processed:
already_processed.add('ySize')
showIndent(outfile, level)
outfile.write('ySize=%d,\n' % (self.ySize,))
if self.xSize is not None and 'xSize' not in already_processed:
already_processed.add('xSize')
showIndent(outfile, level)
outfile.write('xSize=%d,\n' % (self.xSize,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('zSize', node)
if value is not None and 'zSize' not in already_processed:
already_processed.add('zSize')
try:
self.zSize = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.zSize < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('ySize', node)
if value is not None and 'ySize' not in already_processed:
already_processed.add('ySize')
try:
self.ySize = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.ySize < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('xSize', node)
if value is not None and 'xSize' not in already_processed:
already_processed.add('xSize')
try:
self.xSize = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.xSize < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class GridLayout
class Instance(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, i=None, k=None, j=None, id=None, location=None):
self.i = _cast(int, i)
self.k = _cast(int, k)
self.j = _cast(int, j)
self.id = _cast(int, id)
self.location = location
def factory(*args_, **kwargs_):
if Instance.subclass:
return Instance.subclass(*args_, **kwargs_)
else:
return Instance(*args_, **kwargs_)
factory = staticmethod(factory)
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_i(self): return self.i
def set_i(self, i): self.i = i
def get_k(self): return self.k
def set_k(self, k): self.k = k
def get_j(self): return self.j
def set_j(self, j): self.j = j
def get_id(self): return self.id
def set_id(self, id): self.id = id
def hasContent_(self):
if (
self.location is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Instance', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Instance')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Instance'):
if self.i is not None and 'i' not in already_processed:
already_processed.add('i')
outfile.write(' i="%s"' % self.gds_format_integer(self.i, input_name='i'))
if self.k is not None and 'k' not in already_processed:
already_processed.add('k')
outfile.write(' k="%s"' % self.gds_format_integer(self.k, input_name='k'))
if self.j is not None and 'j' not in already_processed:
already_processed.add('j')
outfile.write(' j="%s"' % self.gds_format_integer(self.j, input_name='j'))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='Instance', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.location is not None:
self.location.export(outfile, level, namespace_, name_='location', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Instance'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.i is not None and 'i' not in already_processed:
already_processed.add('i')
showIndent(outfile, level)
outfile.write('i=%d,\n' % (self.i,))
if self.k is not None and 'k' not in already_processed:
already_processed.add('k')
showIndent(outfile, level)
outfile.write('k=%d,\n' % (self.k,))
if self.j is not None and 'j' not in already_processed:
already_processed.add('j')
showIndent(outfile, level)
outfile.write('j=%d,\n' % (self.j,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id=%d,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
if self.location is not None:
showIndent(outfile, level)
outfile.write('location=model_.Location(\n')
self.location.exportLiteral(outfile, level, name_='location')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('i', node)
if value is not None and 'i' not in already_processed:
already_processed.add('i')
try:
self.i = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.i < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('k', node)
if value is not None and 'k' not in already_processed:
already_processed.add('k')
try:
self.k = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.k < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('j', node)
if value is not None and 'j' not in already_processed:
already_processed.add('j')
try:
self.j = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.j < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'location':
obj_ = Location.factory()
obj_.build(child_)
self.set_location(obj_)
# end class Instance
class Location(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, y=None, x=None, z=None):
self.y = _cast(float, y)
self.x = _cast(float, x)
self.z = _cast(float, z)
pass
def factory(*args_, **kwargs_):
if Location.subclass:
return Location.subclass(*args_, **kwargs_)
else:
return Location(*args_, **kwargs_)
factory = staticmethod(factory)
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_z(self): return self.z
def set_z(self, z): self.z = z
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Location', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Location')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Location'):
if self.y is not None and 'y' not in already_processed:
already_processed.add('y')
outfile.write(' y="%s"' % self.gds_format_float(self.y, input_name='y'))
if self.x is not None and 'x' not in already_processed:
already_processed.add('x')
outfile.write(' x="%s"' % self.gds_format_float(self.x, input_name='x'))
if self.z is not None and 'z' not in already_processed:
already_processed.add('z')
outfile.write(' z="%s"' % self.gds_format_float(self.z, input_name='z'))
def exportChildren(self, outfile, level, namespace_='', name_='Location', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Location'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.y is not None and 'y' not in already_processed:
already_processed.add('y')
showIndent(outfile, level)
outfile.write('y=%f,\n' % (self.y,))
if self.x is not None and 'x' not in already_processed:
already_processed.add('x')
showIndent(outfile, level)
outfile.write('x=%f,\n' % (self.x,))
if self.z is not None and 'z' not in already_processed:
already_processed.add('z')
showIndent(outfile, level)
outfile.write('z=%f,\n' % (self.z,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('y', node)
if value is not None and 'y' not in already_processed:
already_processed.add('y')
try:
self.y = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (y): %s' % exp)
value = find_attr_value_('x', node)
if value is not None and 'x' not in already_processed:
already_processed.add('x')
try:
self.x = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (x): %s' % exp)
value = find_attr_value_('z', node)
if value is not None and 'z' not in already_processed:
already_processed.add('z')
try:
self.z = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (z): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Location
class SynapticConnection(GeneratedsSuper):
"""Single explicit connection. Introduced to test connections in LEMS.
Will probably be removed in favour of connections wrapped in
projection element"""
subclass = None
superclass = None
def __init__(self, to=None, synapse=None, fromxx=None):
self.to = _cast(None, to)
self.synapse = _cast(None, synapse)
self.fromxx = _cast(None, fromxx)
pass
def factory(*args_, **kwargs_):
if SynapticConnection.subclass:
return SynapticConnection.subclass(*args_, **kwargs_)
else:
return SynapticConnection(*args_, **kwargs_)
factory = staticmethod(factory)
def get_to(self): return self.to
def set_to(self, to): self.to = to
def get_synapse(self): return self.synapse
def set_synapse(self, synapse): self.synapse = synapse
def get_from(self): return self.fromxx
def set_from(self, fromxx): self.fromxx = fromxx
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SynapticConnection', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SynapticConnection')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SynapticConnection'):
if self.to is not None and 'to' not in already_processed:
already_processed.add('to')
outfile.write(' to=%s' % (self.gds_format_string(quote_attrib(self.to).encode(ExternalEncoding), input_name='to'), ))
if self.synapse is not None and 'synapse' not in already_processed:
already_processed.add('synapse')
outfile.write(' synapse=%s' % (self.gds_format_string(quote_attrib(self.synapse).encode(ExternalEncoding), input_name='synapse'), ))
if self.fromxx is not None and 'fromxx' not in already_processed:
already_processed.add('fromxx')
outfile.write(' from=%s' % (self.gds_format_string(quote_attrib(self.fromxx).encode(ExternalEncoding), input_name='from'), ))
def exportChildren(self, outfile, level, namespace_='', name_='SynapticConnection', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='SynapticConnection'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.to is not None and 'to' not in already_processed:
already_processed.add('to')
showIndent(outfile, level)
outfile.write('to="%s",\n' % (self.to,))
if self.synapse is not None and 'synapse' not in already_processed:
already_processed.add('synapse')
showIndent(outfile, level)
outfile.write('synapse="%s",\n' % (self.synapse,))
if self.fromxx is not None and 'fromxx' not in already_processed:
already_processed.add('fromxx')
showIndent(outfile, level)
outfile.write('fromxx="%s",\n' % (self.fromxx,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('to', node)
if value is not None and 'to' not in already_processed:
already_processed.add('to')
self.to = value
value = find_attr_value_('synapse', node)
if value is not None and 'synapse' not in already_processed:
already_processed.add('synapse')
self.synapse = value
value = find_attr_value_('from', node)
if value is not None and 'from' not in already_processed:
already_processed.add('from')
self.fromxx = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SynapticConnection
class Connection(GeneratedsSuper):
"""Subject to change as it gets tested with LEMS"""
subclass = None
superclass = None
def __init__(self, postCellId=None, id=None, preCellId=None):
self.postCellId = _cast(None, postCellId)
self.id = _cast(int, id)
self.preCellId = _cast(None, preCellId)
pass
def factory(*args_, **kwargs_):
if Connection.subclass:
return Connection.subclass(*args_, **kwargs_)
else:
return Connection(*args_, **kwargs_)
factory = staticmethod(factory)
def get_postCellId(self): return self.postCellId
def set_postCellId(self, postCellId): self.postCellId = postCellId
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_preCellId(self): return self.preCellId
def set_preCellId(self, preCellId): self.preCellId = preCellId
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Connection', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Connection')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Connection'):
if self.postCellId is not None and 'postCellId' not in already_processed:
already_processed.add('postCellId')
outfile.write(' postCellId=%s' % (self.gds_format_string(quote_attrib(self.postCellId).encode(ExternalEncoding), input_name='postCellId'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
if self.preCellId is not None and 'preCellId' not in already_processed:
already_processed.add('preCellId')
outfile.write(' preCellId=%s' % (self.gds_format_string(quote_attrib(self.preCellId).encode(ExternalEncoding), input_name='preCellId'), ))
def exportChildren(self, outfile, level, namespace_='', name_='Connection', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Connection'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.postCellId is not None and 'postCellId' not in already_processed:
already_processed.add('postCellId')
showIndent(outfile, level)
outfile.write('postCellId="%s",\n' % (self.postCellId,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id=%d,\n' % (self.id,))
if self.preCellId is not None and 'preCellId' not in already_processed:
already_processed.add('preCellId')
showIndent(outfile, level)
outfile.write('preCellId="%s",\n' % (self.preCellId,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('postCellId', node)
if value is not None and 'postCellId' not in already_processed:
already_processed.add('postCellId')
self.postCellId = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('preCellId', node)
if value is not None and 'preCellId' not in already_processed:
already_processed.add('preCellId')
self.preCellId = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Connection
class ExplicitInput(GeneratedsSuper):
"""Single explicit input. Introduced to test inputs in LEMS. Will
probably be removed in favour of inputs wrapped in inputList
element"""
subclass = None
superclass = None
def __init__(self, input=None, destination=None, target=None):
self.input = _cast(None, input)
self.destination = _cast(None, destination)
self.target = _cast(None, target)
pass
def factory(*args_, **kwargs_):
if ExplicitInput.subclass:
return ExplicitInput.subclass(*args_, **kwargs_)
else:
return ExplicitInput(*args_, **kwargs_)
factory = staticmethod(factory)
def get_input(self): return self.input
def set_input(self, input): self.input = input
def get_destination(self): return self.destination
def set_destination(self, destination): self.destination = destination
def get_target(self): return self.target
def set_target(self, target): self.target = target
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExplicitInput', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExplicitInput')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExplicitInput'):
if self.input is not None and 'input' not in already_processed:
already_processed.add('input')
outfile.write(' input=%s' % (self.gds_format_string(quote_attrib(self.input).encode(ExternalEncoding), input_name='input'), ))
if self.destination is not None and 'destination' not in already_processed:
already_processed.add('destination')
outfile.write(' destination=%s' % (self.gds_format_string(quote_attrib(self.destination).encode(ExternalEncoding), input_name='destination'), ))
if self.target is not None and 'target' not in already_processed:
already_processed.add('target')
outfile.write(' target=%s' % (self.gds_format_string(quote_attrib(self.target).encode(ExternalEncoding), input_name='target'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ExplicitInput', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='ExplicitInput'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.input is not None and 'input' not in already_processed:
already_processed.add('input')
showIndent(outfile, level)
outfile.write('input="%s",\n' % (self.input,))
if self.destination is not None and 'destination' not in already_processed:
already_processed.add('destination')
showIndent(outfile, level)
outfile.write('destination="%s",\n' % (self.destination,))
if self.target is not None and 'target' not in already_processed:
already_processed.add('target')
showIndent(outfile, level)
outfile.write('target="%s",\n' % (self.target,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('input', node)
if value is not None and 'input' not in already_processed:
already_processed.add('input')
self.input = value
value = find_attr_value_('destination', node)
if value is not None and 'destination' not in already_processed:
already_processed.add('destination')
self.destination = value
value = find_attr_value_('target', node)
if value is not None and 'target' not in already_processed:
already_processed.add('target')
self.target = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ExplicitInput
class Input(GeneratedsSuper):
"""Subject to change as it gets tested with LEMS"""
subclass = None
superclass = None
def __init__(self, destination=None, id=None, target=None):
self.destination = _cast(None, destination)
self.id = _cast(int, id)
self.target = _cast(None, target)
pass
def factory(*args_, **kwargs_):
if Input.subclass:
return Input.subclass(*args_, **kwargs_)
else:
return Input(*args_, **kwargs_)
factory = staticmethod(factory)
def get_destination(self): return self.destination
def set_destination(self, destination): self.destination = destination
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_target(self): return self.target
def set_target(self, target): self.target = target
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Input', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Input')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Input'):
if self.destination is not None and 'destination' not in already_processed:
already_processed.add('destination')
outfile.write(' destination=%s' % (quote_attrib(self.destination), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
if self.target is not None and 'target' not in already_processed:
already_processed.add('target')
outfile.write(' target=%s' % (self.gds_format_string(quote_attrib(self.target).encode(ExternalEncoding), input_name='target'), ))
def exportChildren(self, outfile, level, namespace_='', name_='Input', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Input'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.destination is not None and 'destination' not in already_processed:
already_processed.add('destination')
showIndent(outfile, level)
outfile.write('destination="%s",\n' % (self.destination,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id=%d,\n' % (self.id,))
if self.target is not None and 'target' not in already_processed:
already_processed.add('target')
showIndent(outfile, level)
outfile.write('target="%s",\n' % (self.target,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('destination', node)
if value is not None and 'destination' not in already_processed:
already_processed.add('destination')
self.destination = value
self.validate_NmlId(self.destination) # validate type NmlId
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('target', node)
if value is not None and 'target' not in already_processed:
already_processed.add('target')
self.target = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Input
class Base(GeneratedsSuper):
"""Anything which can have a unique id (within its parent) i.e. most
elements."""
subclass = None
superclass = None
def __init__(self, id=None, neuroLexId=None, extensiontype_=None):
self.id = _cast(None, id)
self.neuroLexId = _cast(None, neuroLexId)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if Base.subclass:
return Base.subclass(*args_, **kwargs_)
else:
return Base(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_neuroLexId(self): return self.neuroLexId
def set_neuroLexId(self, neuroLexId): self.neuroLexId = neuroLexId
def validate_NeuroLexId(self, value):
# Validate type NeuroLexId, a restriction on xs:string.
pass
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Base', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Base')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Base'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (quote_attrib(self.id), ))
if self.neuroLexId is not None and 'neuroLexId' not in already_processed:
already_processed.add('neuroLexId')
outfile.write(' neuroLexId=%s' % (quote_attrib(self.neuroLexId), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='Base', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Base'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.neuroLexId is not None and 'neuroLexId' not in already_processed:
already_processed.add('neuroLexId')
showIndent(outfile, level)
outfile.write('neuroLexId="%s",\n' % (self.neuroLexId,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
self.validate_NmlId(self.id) # validate type NmlId
value = find_attr_value_('neuroLexId', node)
if value is not None and 'neuroLexId' not in already_processed:
already_processed.add('neuroLexId')
self.neuroLexId = value
self.validate_NeuroLexId(self.neuroLexId) # validate type NeuroLexId
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Base
class Standalone(Base):
"""Elements which can stand alone and be referenced by id, e.g. cell,
morphology.Optional human readable name of the element. Not
necessarily unique; should not be used for referencing/indexing."""
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, extensiontype_=None):
super(Standalone, self).__init__(id, neuroLexId, extensiontype_, )
self.name = _cast(None, name)
self.metaid = _cast(None, metaid)
self.notes = notes
self.annotation = annotation
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if Standalone.subclass:
return Standalone.subclass(*args_, **kwargs_)
else:
return Standalone(*args_, **kwargs_)
factory = staticmethod(factory)
def get_notes(self): return self.notes
def set_notes(self, notes): self.notes = notes
def validate_Notes(self, value):
# Validate type Notes, a restriction on xs:string.
pass
def get_annotation(self): return self.annotation
def set_annotation(self, annotation): self.annotation = annotation
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_metaid(self): return self.metaid
def set_metaid(self, metaid): self.metaid = metaid
def validate_MetaId(self, value):
# Validate type MetaId, a restriction on xs:string.
pass
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.notes is not None or
self.annotation is not None or
super(Standalone, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Standalone', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Standalone')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Standalone'):
super(Standalone, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Standalone')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.metaid is not None and 'metaid' not in already_processed:
already_processed.add('metaid')
outfile.write(' metaid=%s' % (quote_attrib(self.metaid), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='Standalone', fromsubclass_=False, pretty_print=True):
super(Standalone, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.notes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snotes>%s</%snotes>%s' % (namespace_, self.gds_format_string(quote_xml(self.notes).encode(ExternalEncoding), input_name='notes'), namespace_, eol_))
if self.annotation is not None:
self.annotation.export(outfile, level, namespace_, name_='annotation', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Standalone'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.metaid is not None and 'metaid' not in already_processed:
already_processed.add('metaid')
showIndent(outfile, level)
outfile.write('metaid="%s",\n' % (self.metaid,))
super(Standalone, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Standalone, self).exportLiteralChildren(outfile, level, name_)
if self.notes is not None:
showIndent(outfile, level)
outfile.write('notes=%s,\n' % quote_python(self.notes).encode(ExternalEncoding))
if self.annotation is not None:
showIndent(outfile, level)
outfile.write('annotation=model_.Annotation(\n')
self.annotation.exportLiteral(outfile, level, name_='annotation')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('metaid', node)
if value is not None and 'metaid' not in already_processed:
already_processed.add('metaid')
self.metaid = value
self.validate_MetaId(self.metaid) # validate type MetaId
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(Standalone, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'notes':
notes_ = child_.text
notes_ = self.gds_validate_string(notes_, node, 'notes')
self.notes = notes_
self.validate_Notes(self.notes) # validate type Notes
elif nodeName_ == 'annotation':
obj_ = Annotation.factory()
obj_.build(child_)
self.set_annotation(obj_)
super(Standalone, self).buildChildren(child_, node, nodeName_, True)
# end class Standalone
class SpikeSourcePoisson(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, duration=None, start=None, rate=None):
super(SpikeSourcePoisson, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.duration = _cast(None, duration)
self.start = _cast(None, start)
self.rate = _cast(None, rate)
pass
def factory(*args_, **kwargs_):
if SpikeSourcePoisson.subclass:
return SpikeSourcePoisson.subclass(*args_, **kwargs_)
else:
return SpikeSourcePoisson(*args_, **kwargs_)
factory = staticmethod(factory)
def get_duration(self): return self.duration
def set_duration(self, duration): self.duration = duration
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_start(self): return self.start
def set_start(self, start): self.start = start
def get_rate(self): return self.rate
def set_rate(self, rate): self.rate = rate
def validate_Nml2Quantity_pertime(self, value):
# Validate type Nml2Quantity_pertime, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(SpikeSourcePoisson, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SpikeSourcePoisson', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeSourcePoisson')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SpikeSourcePoisson'):
super(SpikeSourcePoisson, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeSourcePoisson')
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
outfile.write(' duration=%s' % (quote_attrib(self.duration), ))
if self.start is not None and 'start' not in already_processed:
already_processed.add('start')
outfile.write(' start=%s' % (quote_attrib(self.start), ))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
outfile.write(' rate=%s' % (quote_attrib(self.rate), ))
def exportChildren(self, outfile, level, namespace_='', name_='SpikeSourcePoisson', fromsubclass_=False, pretty_print=True):
super(SpikeSourcePoisson, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SpikeSourcePoisson'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
showIndent(outfile, level)
outfile.write('duration="%s",\n' % (self.duration,))
if self.start is not None and 'start' not in already_processed:
already_processed.add('start')
showIndent(outfile, level)
outfile.write('start="%s",\n' % (self.start,))
if self.rate is not None and 'rate' not in already_processed:
already_processed.add('rate')
showIndent(outfile, level)
outfile.write('rate="%s",\n' % (self.rate,))
super(SpikeSourcePoisson, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SpikeSourcePoisson, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('duration', node)
if value is not None and 'duration' not in already_processed:
already_processed.add('duration')
self.duration = value
self.validate_Nml2Quantity_time(self.duration) # validate type Nml2Quantity_time
value = find_attr_value_('start', node)
if value is not None and 'start' not in already_processed:
already_processed.add('start')
self.start = value
self.validate_Nml2Quantity_time(self.start) # validate type Nml2Quantity_time
value = find_attr_value_('rate', node)
if value is not None and 'rate' not in already_processed:
already_processed.add('rate')
self.rate = value
self.validate_Nml2Quantity_pertime(self.rate) # validate type Nml2Quantity_pertime
super(SpikeSourcePoisson, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SpikeSourcePoisson, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SpikeSourcePoisson
class InputList(Base):
"""Subject to change as it gets tested with LEMS"""
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, component=None, population=None, input=None):
super(InputList, self).__init__(id, neuroLexId, )
self.component = _cast(None, component)
self.population = _cast(None, population)
if input is None:
self.input = []
else:
self.input = input
def factory(*args_, **kwargs_):
if InputList.subclass:
return InputList.subclass(*args_, **kwargs_)
else:
return InputList(*args_, **kwargs_)
factory = staticmethod(factory)
def get_input(self): return self.input
def set_input(self, input): self.input = input
def add_input(self, value): self.input.append(value)
def insert_input(self, index, value): self.input[index] = value
def get_component(self): return self.component
def set_component(self, component): self.component = component
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_population(self): return self.population
def set_population(self, population): self.population = population
def hasContent_(self):
if (
self.input or
super(InputList, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='InputList', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='InputList')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InputList'):
super(InputList, self).exportAttributes(outfile, level, already_processed, namespace_, name_='InputList')
if self.component is not None and 'component' not in already_processed:
already_processed.add('component')
outfile.write(' component=%s' % (quote_attrib(self.component), ))
if self.population is not None and 'population' not in already_processed:
already_processed.add('population')
outfile.write(' population=%s' % (quote_attrib(self.population), ))
def exportChildren(self, outfile, level, namespace_='', name_='InputList', fromsubclass_=False, pretty_print=True):
super(InputList, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for input_ in self.input:
input_.export(outfile, level, namespace_, name_='input', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='InputList'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.component is not None and 'component' not in already_processed:
already_processed.add('component')
showIndent(outfile, level)
outfile.write('component="%s",\n' % (self.component,))
if self.population is not None and 'population' not in already_processed:
already_processed.add('population')
showIndent(outfile, level)
outfile.write('population="%s",\n' % (self.population,))
super(InputList, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(InputList, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('input=[\n')
level += 1
for input_ in self.input:
showIndent(outfile, level)
outfile.write('model_.Input(\n')
input_.exportLiteral(outfile, level, name_='Input')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('component', node)
if value is not None and 'component' not in already_processed:
already_processed.add('component')
self.component = value
self.validate_NmlId(self.component) # validate type NmlId
value = find_attr_value_('population', node)
if value is not None and 'population' not in already_processed:
already_processed.add('population')
self.population = value
self.validate_NmlId(self.population) # validate type NmlId
super(InputList, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'input':
obj_ = Input.factory()
obj_.build(child_)
self.input.append(obj_)
super(InputList, self).buildChildren(child_, node, nodeName_, True)
# end class InputList
class Projection(Base):
"""Subject to change as it gets tested with LEMS"""
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, postsynapticPopulation=None, presynapticPopulation=None, synapse=None, connection=None):
super(Projection, self).__init__(id, neuroLexId, )
self.postsynapticPopulation = _cast(None, postsynapticPopulation)
self.presynapticPopulation = _cast(None, presynapticPopulation)
self.synapse = _cast(None, synapse)
if connection is None:
self.connection = []
else:
self.connection = connection
def factory(*args_, **kwargs_):
if Projection.subclass:
return Projection.subclass(*args_, **kwargs_)
else:
return Projection(*args_, **kwargs_)
factory = staticmethod(factory)
def get_connection(self): return self.connection
def set_connection(self, connection): self.connection = connection
def add_connection(self, value): self.connection.append(value)
def insert_connection(self, index, value): self.connection[index] = value
def get_postsynapticPopulation(self): return self.postsynapticPopulation
def set_postsynapticPopulation(self, postsynapticPopulation): self.postsynapticPopulation = postsynapticPopulation
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_presynapticPopulation(self): return self.presynapticPopulation
def set_presynapticPopulation(self, presynapticPopulation): self.presynapticPopulation = presynapticPopulation
def get_synapse(self): return self.synapse
def set_synapse(self, synapse): self.synapse = synapse
def hasContent_(self):
if (
self.connection or
super(Projection, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Projection', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Projection')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Projection'):
super(Projection, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Projection')
if self.postsynapticPopulation is not None and 'postsynapticPopulation' not in already_processed:
already_processed.add('postsynapticPopulation')
outfile.write(' postsynapticPopulation=%s' % (quote_attrib(self.postsynapticPopulation), ))
if self.presynapticPopulation is not None and 'presynapticPopulation' not in already_processed:
already_processed.add('presynapticPopulation')
outfile.write(' presynapticPopulation=%s' % (quote_attrib(self.presynapticPopulation), ))
if self.synapse is not None and 'synapse' not in already_processed:
already_processed.add('synapse')
outfile.write(' synapse=%s' % (quote_attrib(self.synapse), ))
def exportChildren(self, outfile, level, namespace_='', name_='Projection', fromsubclass_=False, pretty_print=True):
super(Projection, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for connection_ in self.connection:
connection_.export(outfile, level, namespace_, name_='connection', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Projection'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.postsynapticPopulation is not None and 'postsynapticPopulation' not in already_processed:
already_processed.add('postsynapticPopulation')
showIndent(outfile, level)
outfile.write('postsynapticPopulation="%s",\n' % (self.postsynapticPopulation,))
if self.presynapticPopulation is not None and 'presynapticPopulation' not in already_processed:
already_processed.add('presynapticPopulation')
showIndent(outfile, level)
outfile.write('presynapticPopulation="%s",\n' % (self.presynapticPopulation,))
if self.synapse is not None and 'synapse' not in already_processed:
already_processed.add('synapse')
showIndent(outfile, level)
outfile.write('synapse="%s",\n' % (self.synapse,))
super(Projection, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Projection, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('connection=[\n')
level += 1
for connection_ in self.connection:
showIndent(outfile, level)
outfile.write('model_.Connection(\n')
connection_.exportLiteral(outfile, level, name_='Connection')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('postsynapticPopulation', node)
if value is not None and 'postsynapticPopulation' not in already_processed:
already_processed.add('postsynapticPopulation')
self.postsynapticPopulation = value
self.validate_NmlId(self.postsynapticPopulation) # validate type NmlId
value = find_attr_value_('presynapticPopulation', node)
if value is not None and 'presynapticPopulation' not in already_processed:
already_processed.add('presynapticPopulation')
self.presynapticPopulation = value
self.validate_NmlId(self.presynapticPopulation) # validate type NmlId
value = find_attr_value_('synapse', node)
if value is not None and 'synapse' not in already_processed:
already_processed.add('synapse')
self.synapse = value
self.validate_NmlId(self.synapse) # validate type NmlId
super(Projection, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'connection':
obj_ = Connection.factory()
obj_.build(child_)
self.connection.append(obj_)
super(Projection, self).buildChildren(child_, node, nodeName_, True)
# end class Projection
class CellSet(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, select=None, anytypeobjs_=None):
super(CellSet, self).__init__(id, neuroLexId, )
self.select = _cast(None, select)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if CellSet.subclass:
return CellSet.subclass(*args_, **kwargs_)
else:
return CellSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_select(self): return self.select
def set_select(self, select): self.select = select
def hasContent_(self):
if (
self.anytypeobjs_ or
super(CellSet, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CellSet', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CellSet')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CellSet'):
super(CellSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='CellSet')
if self.select is not None and 'select' not in already_processed:
already_processed.add('select')
outfile.write(' select=%s' % (self.gds_format_string(quote_attrib(self.select).encode(ExternalEncoding), input_name='select'), ))
def exportChildren(self, outfile, level, namespace_='', name_='CellSet', fromsubclass_=False, pretty_print=True):
super(CellSet, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='CellSet'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.select is not None and 'select' not in already_processed:
already_processed.add('select')
showIndent(outfile, level)
outfile.write('select="%s",\n' % (self.select,))
super(CellSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(CellSet, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('select', node)
if value is not None and 'select' not in already_processed:
already_processed.add('select')
self.select = value
super(CellSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'CellSet')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(CellSet, self).buildChildren(child_, node, nodeName_, True)
# end class CellSet
class Population(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, extracellularProperties=None, network=None, component=None, cell=None, type_=None, size=None, layout=None, instance=None):
super(Population, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.extracellularProperties = _cast(None, extracellularProperties)
self.network = _cast(None, network)
self.component = _cast(None, component)
self.cell = _cast(None, cell)
self.type_ = _cast(None, type_)
self.size = _cast(int, size)
self.layout = layout
if instance is None:
self.instance = []
else:
self.instance = instance
def factory(*args_, **kwargs_):
if Population.subclass:
return Population.subclass(*args_, **kwargs_)
else:
return Population(*args_, **kwargs_)
factory = staticmethod(factory)
def get_layout(self): return self.layout
def set_layout(self, layout): self.layout = layout
def get_instance(self): return self.instance
def set_instance(self, instance): self.instance = instance
def add_instance(self, value): self.instance.append(value)
def insert_instance(self, index, value): self.instance[index] = value
def get_extracellularProperties(self): return self.extracellularProperties
def set_extracellularProperties(self, extracellularProperties): self.extracellularProperties = extracellularProperties
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_network(self): return self.network
def set_network(self, network): self.network = network
def get_component(self): return self.component
def set_component(self, component): self.component = component
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_populationTypes(self, value):
# Validate type populationTypes, a restriction on xs:string.
pass
def get_size(self): return self.size
def set_size(self, size): self.size = size
def hasContent_(self):
if (
self.layout is not None or
self.instance or
super(Population, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Population', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Population')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Population'):
super(Population, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Population')
if self.extracellularProperties is not None and 'extracellularProperties' not in already_processed:
already_processed.add('extracellularProperties')
outfile.write(' extracellularProperties=%s' % (quote_attrib(self.extracellularProperties), ))
if self.network is not None and 'network' not in already_processed:
already_processed.add('network')
outfile.write(' network=%s' % (quote_attrib(self.network), ))
if self.component is not None and 'component' not in already_processed:
already_processed.add('component')
outfile.write(' component=%s' % (quote_attrib(self.component), ))
if self.cell is not None and 'cell' not in already_processed:
already_processed.add('cell')
outfile.write(' cell=%s' % (quote_attrib(self.cell), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.size is not None and 'size' not in already_processed:
already_processed.add('size')
outfile.write(' size="%s"' % self.gds_format_integer(self.size, input_name='size'))
def exportChildren(self, outfile, level, namespace_='', name_='Population', fromsubclass_=False, pretty_print=True):
super(Population, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.layout is not None:
self.layout.export(outfile, level, namespace_, name_='layout', pretty_print=pretty_print)
for instance_ in self.instance:
instance_.export(outfile, level, namespace_, name_='instance', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Population'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.extracellularProperties is not None and 'extracellularProperties' not in already_processed:
already_processed.add('extracellularProperties')
showIndent(outfile, level)
outfile.write('extracellularProperties="%s",\n' % (self.extracellularProperties,))
if self.network is not None and 'network' not in already_processed:
already_processed.add('network')
showIndent(outfile, level)
outfile.write('network="%s",\n' % (self.network,))
if self.component is not None and 'component' not in already_processed:
already_processed.add('component')
showIndent(outfile, level)
outfile.write('component="%s",\n' % (self.component,))
if self.cell is not None and 'cell' not in already_processed:
already_processed.add('cell')
showIndent(outfile, level)
outfile.write('cell="%s",\n' % (self.cell,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.size is not None and 'size' not in already_processed:
already_processed.add('size')
showIndent(outfile, level)
outfile.write('size=%d,\n' % (self.size,))
super(Population, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Population, self).exportLiteralChildren(outfile, level, name_)
if self.layout is not None:
showIndent(outfile, level)
outfile.write('layout=model_.Layout(\n')
self.layout.exportLiteral(outfile, level, name_='layout')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('instance=[\n')
level += 1
for instance_ in self.instance:
showIndent(outfile, level)
outfile.write('model_.Instance(\n')
instance_.exportLiteral(outfile, level, name_='Instance')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('extracellularProperties', node)
if value is not None and 'extracellularProperties' not in already_processed:
already_processed.add('extracellularProperties')
self.extracellularProperties = value
self.validate_NmlId(self.extracellularProperties) # validate type NmlId
value = find_attr_value_('network', node)
if value is not None and 'network' not in already_processed:
already_processed.add('network')
self.network = value
self.validate_NmlId(self.network) # validate type NmlId
value = find_attr_value_('component', node)
if value is not None and 'component' not in already_processed:
already_processed.add('component')
self.component = value
self.validate_NmlId(self.component) # validate type NmlId
value = find_attr_value_('cell', node)
if value is not None and 'cell' not in already_processed:
already_processed.add('cell')
self.cell = value
self.validate_NmlId(self.cell) # validate type NmlId
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_populationTypes(self.type_) # validate type populationTypes
value = find_attr_value_('size', node)
if value is not None and 'size' not in already_processed:
already_processed.add('size')
try:
self.size = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(Population, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'layout':
obj_ = Layout.factory()
obj_.build(child_)
self.set_layout(obj_)
elif nodeName_ == 'instance':
obj_ = Instance.factory()
obj_.build(child_)
self.instance.append(obj_)
super(Population, self).buildChildren(child_, node, nodeName_, True)
# end class Population
class Region(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, space=None, anytypeobjs_=None):
super(Region, self).__init__(id, neuroLexId, )
self.space = _cast(None, space)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if Region.subclass:
return Region.subclass(*args_, **kwargs_)
else:
return Region(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_space(self): return self.space
def set_space(self, space): self.space = space
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.anytypeobjs_ or
super(Region, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Region', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Region')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Region'):
super(Region, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Region')
if self.space is not None and 'space' not in already_processed:
already_processed.add('space')
outfile.write(' space=%s' % (quote_attrib(self.space), ))
def exportChildren(self, outfile, level, namespace_='', name_='Region', fromsubclass_=False, pretty_print=True):
super(Region, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Region'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.space is not None and 'space' not in already_processed:
already_processed.add('space')
showIndent(outfile, level)
outfile.write('space="%s",\n' % (self.space,))
super(Region, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Region, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('space', node)
if value is not None and 'space' not in already_processed:
already_processed.add('space')
self.space = value
self.validate_NmlId(self.space) # validate type NmlId
super(Region, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'Region')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(Region, self).buildChildren(child_, node, nodeName_, True)
# end class Region
class Space(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, basedOn=None, structure=None):
super(Space, self).__init__(id, neuroLexId, )
self.basedOn = _cast(None, basedOn)
self.structure = structure
def factory(*args_, **kwargs_):
if Space.subclass:
return Space.subclass(*args_, **kwargs_)
else:
return Space(*args_, **kwargs_)
factory = staticmethod(factory)
def get_structure(self): return self.structure
def set_structure(self, structure): self.structure = structure
def get_basedOn(self): return self.basedOn
def set_basedOn(self, basedOn): self.basedOn = basedOn
def validate_allowedSpaces(self, value):
# Validate type allowedSpaces, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.structure is not None or
super(Space, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Space', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Space')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Space'):
super(Space, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Space')
if self.basedOn is not None and 'basedOn' not in already_processed:
already_processed.add('basedOn')
outfile.write(' basedOn=%s' % (quote_attrib(self.basedOn), ))
def exportChildren(self, outfile, level, namespace_='', name_='Space', fromsubclass_=False, pretty_print=True):
super(Space, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.structure is not None:
self.structure.export(outfile, level, namespace_, name_='structure', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Space'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.basedOn is not None and 'basedOn' not in already_processed:
already_processed.add('basedOn')
showIndent(outfile, level)
outfile.write('basedOn="%s",\n' % (self.basedOn,))
super(Space, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Space, self).exportLiteralChildren(outfile, level, name_)
if self.structure is not None:
showIndent(outfile, level)
outfile.write('structure=model_.SpaceStructure(\n')
self.structure.exportLiteral(outfile, level, name_='structure')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('basedOn', node)
if value is not None and 'basedOn' not in already_processed:
already_processed.add('basedOn')
self.basedOn = value
self.validate_allowedSpaces(self.basedOn) # validate type allowedSpaces
super(Space, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'structure':
obj_ = SpaceStructure.factory()
obj_.build(child_)
self.set_structure(obj_)
super(Space, self).buildChildren(child_, node, nodeName_, True)
# end class Space
class Network(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, space=None, region=None, extracellularProperties=None, population=None, cellSet=None, synapticConnection=None, projection=None, explicitInput=None, inputList=None):
super(Network, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
if space is None:
self.space = []
else:
self.space = space
if region is None:
self.region = []
else:
self.region = region
if extracellularProperties is None:
self.extracellularProperties = []
else:
self.extracellularProperties = extracellularProperties
if population is None:
self.population = []
else:
self.population = population
if cellSet is None:
self.cellSet = []
else:
self.cellSet = cellSet
if synapticConnection is None:
self.synapticConnection = []
else:
self.synapticConnection = synapticConnection
if projection is None:
self.projection = []
else:
self.projection = projection
if explicitInput is None:
self.explicitInput = []
else:
self.explicitInput = explicitInput
if inputList is None:
self.inputList = []
else:
self.inputList = inputList
def factory(*args_, **kwargs_):
if Network.subclass:
return Network.subclass(*args_, **kwargs_)
else:
return Network(*args_, **kwargs_)
factory = staticmethod(factory)
def get_space(self): return self.space
def set_space(self, space): self.space = space
def add_space(self, value): self.space.append(value)
def insert_space(self, index, value): self.space[index] = value
def get_region(self): return self.region
def set_region(self, region): self.region = region
def add_region(self, value): self.region.append(value)
def insert_region(self, index, value): self.region[index] = value
def get_extracellularProperties(self): return self.extracellularProperties
def set_extracellularProperties(self, extracellularProperties): self.extracellularProperties = extracellularProperties
def add_extracellularProperties(self, value): self.extracellularProperties.append(value)
def insert_extracellularProperties(self, index, value): self.extracellularProperties[index] = value
def get_population(self): return self.population
def set_population(self, population): self.population = population
def add_population(self, value): self.population.append(value)
def insert_population(self, index, value): self.population[index] = value
def get_cellSet(self): return self.cellSet
def set_cellSet(self, cellSet): self.cellSet = cellSet
def add_cellSet(self, value): self.cellSet.append(value)
def insert_cellSet(self, index, value): self.cellSet[index] = value
def get_synapticConnection(self): return self.synapticConnection
def set_synapticConnection(self, synapticConnection): self.synapticConnection = synapticConnection
def add_synapticConnection(self, value): self.synapticConnection.append(value)
def insert_synapticConnection(self, index, value): self.synapticConnection[index] = value
def get_projection(self): return self.projection
def set_projection(self, projection): self.projection = projection
def add_projection(self, value): self.projection.append(value)
def insert_projection(self, index, value): self.projection[index] = value
def get_explicitInput(self): return self.explicitInput
def set_explicitInput(self, explicitInput): self.explicitInput = explicitInput
def add_explicitInput(self, value): self.explicitInput.append(value)
def insert_explicitInput(self, index, value): self.explicitInput[index] = value
def get_inputList(self): return self.inputList
def set_inputList(self, inputList): self.inputList = inputList
def add_inputList(self, value): self.inputList.append(value)
def insert_inputList(self, index, value): self.inputList[index] = value
def hasContent_(self):
if (
self.space or
self.region or
self.extracellularProperties or
self.population or
self.cellSet or
self.synapticConnection or
self.projection or
self.explicitInput or
self.inputList or
super(Network, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Network', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Network')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Network'):
super(Network, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Network')
def exportChildren(self, outfile, level, namespace_='', name_='Network', fromsubclass_=False, pretty_print=True):
super(Network, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for space_ in self.space:
space_.export(outfile, level, namespace_, name_='space', pretty_print=pretty_print)
for region_ in self.region:
region_.export(outfile, level, namespace_, name_='region', pretty_print=pretty_print)
for extracellularProperties_ in self.extracellularProperties:
extracellularProperties_.export(outfile, level, namespace_, name_='extracellularProperties', pretty_print=pretty_print)
for population_ in self.population:
population_.export(outfile, level, namespace_, name_='population', pretty_print=pretty_print)
for cellSet_ in self.cellSet:
cellSet_.export(outfile, level, namespace_, name_='cellSet', pretty_print=pretty_print)
for synapticConnection_ in self.synapticConnection:
synapticConnection_.export(outfile, level, namespace_, name_='synapticConnection', pretty_print=pretty_print)
for projection_ in self.projection:
projection_.export(outfile, level, namespace_, name_='projection', pretty_print=pretty_print)
for explicitInput_ in self.explicitInput:
explicitInput_.export(outfile, level, namespace_, name_='explicitInput', pretty_print=pretty_print)
for inputList_ in self.inputList:
inputList_.export(outfile, level, namespace_, name_='inputList', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Network'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Network, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Network, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('space=[\n')
level += 1
for space_ in self.space:
showIndent(outfile, level)
outfile.write('model_.Space(\n')
space_.exportLiteral(outfile, level, name_='Space')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('region=[\n')
level += 1
for region_ in self.region:
showIndent(outfile, level)
outfile.write('model_.Region(\n')
region_.exportLiteral(outfile, level, name_='Region')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('extracellularProperties=[\n')
level += 1
for extracellularProperties_ in self.extracellularProperties:
showIndent(outfile, level)
outfile.write('model_.ExtracellularPropertiesLocal(\n')
extracellularProperties_.exportLiteral(outfile, level, name_='ExtracellularPropertiesLocal')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('population=[\n')
level += 1
for population_ in self.population:
showIndent(outfile, level)
outfile.write('model_.Population(\n')
population_.exportLiteral(outfile, level, name_='Population')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cellSet=[\n')
level += 1
for cellSet_ in self.cellSet:
showIndent(outfile, level)
outfile.write('model_.CellSet(\n')
cellSet_.exportLiteral(outfile, level, name_='CellSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('synapticConnection=[\n')
level += 1
for synapticConnection_ in self.synapticConnection:
showIndent(outfile, level)
outfile.write('model_.SynapticConnection(\n')
synapticConnection_.exportLiteral(outfile, level, name_='SynapticConnection')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('projection=[\n')
level += 1
for projection_ in self.projection:
showIndent(outfile, level)
outfile.write('model_.Projection(\n')
projection_.exportLiteral(outfile, level, name_='Projection')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('explicitInput=[\n')
level += 1
for explicitInput_ in self.explicitInput:
showIndent(outfile, level)
outfile.write('model_.ExplicitInput(\n')
explicitInput_.exportLiteral(outfile, level, name_='ExplicitInput')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('inputList=[\n')
level += 1
for inputList_ in self.inputList:
showIndent(outfile, level)
outfile.write('model_.InputList(\n')
inputList_.exportLiteral(outfile, level, name_='InputList')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Network, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'space':
obj_ = Space.factory()
obj_.build(child_)
self.space.append(obj_)
elif nodeName_ == 'region':
obj_ = Region.factory()
obj_.build(child_)
self.region.append(obj_)
elif nodeName_ == 'extracellularProperties':
obj_ = ExtracellularPropertiesLocal.factory()
obj_.build(child_)
self.extracellularProperties.append(obj_)
elif nodeName_ == 'population':
obj_ = Population.factory()
obj_.build(child_)
self.population.append(obj_)
elif nodeName_ == 'cellSet':
obj_ = CellSet.factory()
obj_.build(child_)
self.cellSet.append(obj_)
elif nodeName_ == 'synapticConnection':
obj_ = SynapticConnection.factory()
obj_.build(child_)
self.synapticConnection.append(obj_)
elif nodeName_ == 'projection':
obj_ = Projection.factory()
obj_.build(child_)
self.projection.append(obj_)
elif nodeName_ == 'explicitInput':
obj_ = ExplicitInput.factory()
obj_.build(child_)
self.explicitInput.append(obj_)
elif nodeName_ == 'inputList':
obj_ = InputList.factory()
obj_.build(child_)
self.inputList.append(obj_)
super(Network, self).buildChildren(child_, node, nodeName_, True)
# end class Network
class SpikeGeneratorPoisson(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, averageRate=None):
super(SpikeGeneratorPoisson, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.averageRate = _cast(None, averageRate)
pass
def factory(*args_, **kwargs_):
if SpikeGeneratorPoisson.subclass:
return SpikeGeneratorPoisson.subclass(*args_, **kwargs_)
else:
return SpikeGeneratorPoisson(*args_, **kwargs_)
factory = staticmethod(factory)
def get_averageRate(self): return self.averageRate
def set_averageRate(self, averageRate): self.averageRate = averageRate
def validate_Nml2Quantity_pertime(self, value):
# Validate type Nml2Quantity_pertime, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(SpikeGeneratorPoisson, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SpikeGeneratorPoisson', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeGeneratorPoisson')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SpikeGeneratorPoisson'):
super(SpikeGeneratorPoisson, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeGeneratorPoisson')
if self.averageRate is not None and 'averageRate' not in already_processed:
already_processed.add('averageRate')
outfile.write(' averageRate=%s' % (quote_attrib(self.averageRate), ))
def exportChildren(self, outfile, level, namespace_='', name_='SpikeGeneratorPoisson', fromsubclass_=False, pretty_print=True):
super(SpikeGeneratorPoisson, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SpikeGeneratorPoisson'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.averageRate is not None and 'averageRate' not in already_processed:
already_processed.add('averageRate')
showIndent(outfile, level)
outfile.write('averageRate="%s",\n' % (self.averageRate,))
super(SpikeGeneratorPoisson, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SpikeGeneratorPoisson, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('averageRate', node)
if value is not None and 'averageRate' not in already_processed:
already_processed.add('averageRate')
self.averageRate = value
self.validate_Nml2Quantity_pertime(self.averageRate) # validate type Nml2Quantity_pertime
super(SpikeGeneratorPoisson, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SpikeGeneratorPoisson, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SpikeGeneratorPoisson
class SpikeGeneratorRandom(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, minISI=None, maxISI=None):
super(SpikeGeneratorRandom, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.minISI = _cast(None, minISI)
self.maxISI = _cast(None, maxISI)
pass
def factory(*args_, **kwargs_):
if SpikeGeneratorRandom.subclass:
return SpikeGeneratorRandom.subclass(*args_, **kwargs_)
else:
return SpikeGeneratorRandom(*args_, **kwargs_)
factory = staticmethod(factory)
def get_minISI(self): return self.minISI
def set_minISI(self, minISI): self.minISI = minISI
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_maxISI(self): return self.maxISI
def set_maxISI(self, maxISI): self.maxISI = maxISI
def hasContent_(self):
if (
super(SpikeGeneratorRandom, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SpikeGeneratorRandom', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeGeneratorRandom')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SpikeGeneratorRandom'):
super(SpikeGeneratorRandom, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeGeneratorRandom')
if self.minISI is not None and 'minISI' not in already_processed:
already_processed.add('minISI')
outfile.write(' minISI=%s' % (quote_attrib(self.minISI), ))
if self.maxISI is not None and 'maxISI' not in already_processed:
already_processed.add('maxISI')
outfile.write(' maxISI=%s' % (quote_attrib(self.maxISI), ))
def exportChildren(self, outfile, level, namespace_='', name_='SpikeGeneratorRandom', fromsubclass_=False, pretty_print=True):
super(SpikeGeneratorRandom, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SpikeGeneratorRandom'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.minISI is not None and 'minISI' not in already_processed:
already_processed.add('minISI')
showIndent(outfile, level)
outfile.write('minISI="%s",\n' % (self.minISI,))
if self.maxISI is not None and 'maxISI' not in already_processed:
already_processed.add('maxISI')
showIndent(outfile, level)
outfile.write('maxISI="%s",\n' % (self.maxISI,))
super(SpikeGeneratorRandom, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SpikeGeneratorRandom, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('minISI', node)
if value is not None and 'minISI' not in already_processed:
already_processed.add('minISI')
self.minISI = value
self.validate_Nml2Quantity_time(self.minISI) # validate type Nml2Quantity_time
value = find_attr_value_('maxISI', node)
if value is not None and 'maxISI' not in already_processed:
already_processed.add('maxISI')
self.maxISI = value
self.validate_Nml2Quantity_time(self.maxISI) # validate type Nml2Quantity_time
super(SpikeGeneratorRandom, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SpikeGeneratorRandom, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SpikeGeneratorRandom
class SpikeGenerator(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, period=None):
super(SpikeGenerator, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.period = _cast(None, period)
pass
def factory(*args_, **kwargs_):
if SpikeGenerator.subclass:
return SpikeGenerator.subclass(*args_, **kwargs_)
else:
return SpikeGenerator(*args_, **kwargs_)
factory = staticmethod(factory)
def get_period(self): return self.period
def set_period(self, period): self.period = period
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(SpikeGenerator, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SpikeGenerator', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeGenerator')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SpikeGenerator'):
super(SpikeGenerator, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeGenerator')
if self.period is not None and 'period' not in already_processed:
already_processed.add('period')
outfile.write(' period=%s' % (quote_attrib(self.period), ))
def exportChildren(self, outfile, level, namespace_='', name_='SpikeGenerator', fromsubclass_=False, pretty_print=True):
super(SpikeGenerator, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SpikeGenerator'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.period is not None and 'period' not in already_processed:
already_processed.add('period')
showIndent(outfile, level)
outfile.write('period="%s",\n' % (self.period,))
super(SpikeGenerator, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SpikeGenerator, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('period', node)
if value is not None and 'period' not in already_processed:
already_processed.add('period')
self.period = value
self.validate_Nml2Quantity_time(self.period) # validate type Nml2Quantity_time
super(SpikeGenerator, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SpikeGenerator, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SpikeGenerator
class SpikeArray(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, spike=None):
super(SpikeArray, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
if spike is None:
self.spike = []
else:
self.spike = spike
def factory(*args_, **kwargs_):
if SpikeArray.subclass:
return SpikeArray.subclass(*args_, **kwargs_)
else:
return SpikeArray(*args_, **kwargs_)
factory = staticmethod(factory)
def get_spike(self): return self.spike
def set_spike(self, spike): self.spike = spike
def add_spike(self, value): self.spike.append(value)
def insert_spike(self, index, value): self.spike[index] = value
def hasContent_(self):
if (
self.spike or
super(SpikeArray, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SpikeArray', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeArray')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SpikeArray'):
super(SpikeArray, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SpikeArray')
def exportChildren(self, outfile, level, namespace_='', name_='SpikeArray', fromsubclass_=False, pretty_print=True):
super(SpikeArray, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for spike_ in self.spike:
spike_.export(outfile, level, namespace_, name_='spike', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SpikeArray'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(SpikeArray, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SpikeArray, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('spike=[\n')
level += 1
for spike_ in self.spike:
showIndent(outfile, level)
outfile.write('model_.Spike(\n')
spike_.exportLiteral(outfile, level, name_='Spike')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(SpikeArray, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'spike':
obj_ = Spike.factory()
obj_.build(child_)
self.spike.append(obj_)
super(SpikeArray, self).buildChildren(child_, node, nodeName_, True)
# end class SpikeArray
class Spike(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, time=None):
super(Spike, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.time = _cast(None, time)
pass
def factory(*args_, **kwargs_):
if Spike.subclass:
return Spike.subclass(*args_, **kwargs_)
else:
return Spike(*args_, **kwargs_)
factory = staticmethod(factory)
def get_time(self): return self.time
def set_time(self, time): self.time = time
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(Spike, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Spike', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Spike')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Spike'):
super(Spike, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Spike')
if self.time is not None and 'time' not in already_processed:
already_processed.add('time')
outfile.write(' time=%s' % (quote_attrib(self.time), ))
def exportChildren(self, outfile, level, namespace_='', name_='Spike', fromsubclass_=False, pretty_print=True):
super(Spike, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Spike'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.time is not None and 'time' not in already_processed:
already_processed.add('time')
showIndent(outfile, level)
outfile.write('time="%s",\n' % (self.time,))
super(Spike, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Spike, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('time', node)
if value is not None and 'time' not in already_processed:
already_processed.add('time')
self.time = value
self.validate_Nml2Quantity_time(self.time) # validate type Nml2Quantity_time
super(Spike, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Spike, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Spike
class VoltageClamp(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, delay=None, duration=None, seriesResistance=None, targetVoltage=None):
super(VoltageClamp, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.delay = _cast(None, delay)
self.duration = _cast(None, duration)
self.seriesResistance = _cast(None, seriesResistance)
self.targetVoltage = _cast(None, targetVoltage)
pass
def factory(*args_, **kwargs_):
if VoltageClamp.subclass:
return VoltageClamp.subclass(*args_, **kwargs_)
else:
return VoltageClamp(*args_, **kwargs_)
factory = staticmethod(factory)
def get_delay(self): return self.delay
def set_delay(self, delay): self.delay = delay
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_duration(self): return self.duration
def set_duration(self, duration): self.duration = duration
def get_seriesResistance(self): return self.seriesResistance
def set_seriesResistance(self, seriesResistance): self.seriesResistance = seriesResistance
def validate_Nml2Quantity_resistance(self, value):
# Validate type Nml2Quantity_resistance, a restriction on xs:string.
pass
def get_targetVoltage(self): return self.targetVoltage
def set_targetVoltage(self, targetVoltage): self.targetVoltage = targetVoltage
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(VoltageClamp, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VoltageClamp', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VoltageClamp')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VoltageClamp'):
super(VoltageClamp, self).exportAttributes(outfile, level, already_processed, namespace_, name_='VoltageClamp')
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
outfile.write(' delay=%s' % (quote_attrib(self.delay), ))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
outfile.write(' duration=%s' % (quote_attrib(self.duration), ))
if self.seriesResistance is not None and 'seriesResistance' not in already_processed:
already_processed.add('seriesResistance')
outfile.write(' seriesResistance=%s' % (quote_attrib(self.seriesResistance), ))
if self.targetVoltage is not None and 'targetVoltage' not in already_processed:
already_processed.add('targetVoltage')
outfile.write(' targetVoltage=%s' % (quote_attrib(self.targetVoltage), ))
def exportChildren(self, outfile, level, namespace_='', name_='VoltageClamp', fromsubclass_=False, pretty_print=True):
super(VoltageClamp, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VoltageClamp'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
showIndent(outfile, level)
outfile.write('delay="%s",\n' % (self.delay,))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
showIndent(outfile, level)
outfile.write('duration="%s",\n' % (self.duration,))
if self.seriesResistance is not None and 'seriesResistance' not in already_processed:
already_processed.add('seriesResistance')
showIndent(outfile, level)
outfile.write('seriesResistance="%s",\n' % (self.seriesResistance,))
if self.targetVoltage is not None and 'targetVoltage' not in already_processed:
already_processed.add('targetVoltage')
showIndent(outfile, level)
outfile.write('targetVoltage="%s",\n' % (self.targetVoltage,))
super(VoltageClamp, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(VoltageClamp, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('delay', node)
if value is not None and 'delay' not in already_processed:
already_processed.add('delay')
self.delay = value
self.validate_Nml2Quantity_time(self.delay) # validate type Nml2Quantity_time
value = find_attr_value_('duration', node)
if value is not None and 'duration' not in already_processed:
already_processed.add('duration')
self.duration = value
self.validate_Nml2Quantity_time(self.duration) # validate type Nml2Quantity_time
value = find_attr_value_('seriesResistance', node)
if value is not None and 'seriesResistance' not in already_processed:
already_processed.add('seriesResistance')
self.seriesResistance = value
self.validate_Nml2Quantity_resistance(self.seriesResistance) # validate type Nml2Quantity_resistance
value = find_attr_value_('targetVoltage', node)
if value is not None and 'targetVoltage' not in already_processed:
already_processed.add('targetVoltage')
self.targetVoltage = value
self.validate_Nml2Quantity_voltage(self.targetVoltage) # validate type Nml2Quantity_voltage
super(VoltageClamp, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(VoltageClamp, self).buildChildren(child_, node, nodeName_, True)
pass
# end class VoltageClamp
class RampGenerator(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, delay=None, duration=None, baselineAmplitude=None, startAmplitude=None, finishAmplitude=None):
super(RampGenerator, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.delay = _cast(None, delay)
self.duration = _cast(None, duration)
self.baselineAmplitude = _cast(None, baselineAmplitude)
self.startAmplitude = _cast(None, startAmplitude)
self.finishAmplitude = _cast(None, finishAmplitude)
pass
def factory(*args_, **kwargs_):
if RampGenerator.subclass:
return RampGenerator.subclass(*args_, **kwargs_)
else:
return RampGenerator(*args_, **kwargs_)
factory = staticmethod(factory)
def get_delay(self): return self.delay
def set_delay(self, delay): self.delay = delay
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_duration(self): return self.duration
def set_duration(self, duration): self.duration = duration
def get_baselineAmplitude(self): return self.baselineAmplitude
def set_baselineAmplitude(self, baselineAmplitude): self.baselineAmplitude = baselineAmplitude
def validate_Nml2Quantity_current(self, value):
# Validate type Nml2Quantity_current, a restriction on xs:string.
pass
def get_startAmplitude(self): return self.startAmplitude
def set_startAmplitude(self, startAmplitude): self.startAmplitude = startAmplitude
def get_finishAmplitude(self): return self.finishAmplitude
def set_finishAmplitude(self, finishAmplitude): self.finishAmplitude = finishAmplitude
def hasContent_(self):
if (
super(RampGenerator, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='RampGenerator', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RampGenerator')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RampGenerator'):
super(RampGenerator, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RampGenerator')
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
outfile.write(' delay=%s' % (quote_attrib(self.delay), ))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
outfile.write(' duration=%s' % (quote_attrib(self.duration), ))
if self.baselineAmplitude is not None and 'baselineAmplitude' not in already_processed:
already_processed.add('baselineAmplitude')
outfile.write(' baselineAmplitude=%s' % (quote_attrib(self.baselineAmplitude), ))
if self.startAmplitude is not None and 'startAmplitude' not in already_processed:
already_processed.add('startAmplitude')
outfile.write(' startAmplitude=%s' % (quote_attrib(self.startAmplitude), ))
if self.finishAmplitude is not None and 'finishAmplitude' not in already_processed:
already_processed.add('finishAmplitude')
outfile.write(' finishAmplitude=%s' % (quote_attrib(self.finishAmplitude), ))
def exportChildren(self, outfile, level, namespace_='', name_='RampGenerator', fromsubclass_=False, pretty_print=True):
super(RampGenerator, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='RampGenerator'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
showIndent(outfile, level)
outfile.write('delay="%s",\n' % (self.delay,))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
showIndent(outfile, level)
outfile.write('duration="%s",\n' % (self.duration,))
if self.baselineAmplitude is not None and 'baselineAmplitude' not in already_processed:
already_processed.add('baselineAmplitude')
showIndent(outfile, level)
outfile.write('baselineAmplitude="%s",\n' % (self.baselineAmplitude,))
if self.startAmplitude is not None and 'startAmplitude' not in already_processed:
already_processed.add('startAmplitude')
showIndent(outfile, level)
outfile.write('startAmplitude="%s",\n' % (self.startAmplitude,))
if self.finishAmplitude is not None and 'finishAmplitude' not in already_processed:
already_processed.add('finishAmplitude')
showIndent(outfile, level)
outfile.write('finishAmplitude="%s",\n' % (self.finishAmplitude,))
super(RampGenerator, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RampGenerator, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('delay', node)
if value is not None and 'delay' not in already_processed:
already_processed.add('delay')
self.delay = value
self.validate_Nml2Quantity_time(self.delay) # validate type Nml2Quantity_time
value = find_attr_value_('duration', node)
if value is not None and 'duration' not in already_processed:
already_processed.add('duration')
self.duration = value
self.validate_Nml2Quantity_time(self.duration) # validate type Nml2Quantity_time
value = find_attr_value_('baselineAmplitude', node)
if value is not None and 'baselineAmplitude' not in already_processed:
already_processed.add('baselineAmplitude')
self.baselineAmplitude = value
self.validate_Nml2Quantity_current(self.baselineAmplitude) # validate type Nml2Quantity_current
value = find_attr_value_('startAmplitude', node)
if value is not None and 'startAmplitude' not in already_processed:
already_processed.add('startAmplitude')
self.startAmplitude = value
self.validate_Nml2Quantity_current(self.startAmplitude) # validate type Nml2Quantity_current
value = find_attr_value_('finishAmplitude', node)
if value is not None and 'finishAmplitude' not in already_processed:
already_processed.add('finishAmplitude')
self.finishAmplitude = value
self.validate_Nml2Quantity_current(self.finishAmplitude) # validate type Nml2Quantity_current
super(RampGenerator, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(RampGenerator, self).buildChildren(child_, node, nodeName_, True)
pass
# end class RampGenerator
class SineGenerator(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, delay=None, phase=None, duration=None, period=None, amplitude=None):
super(SineGenerator, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.delay = _cast(None, delay)
self.phase = _cast(None, phase)
self.duration = _cast(None, duration)
self.period = _cast(None, period)
self.amplitude = _cast(None, amplitude)
pass
def factory(*args_, **kwargs_):
if SineGenerator.subclass:
return SineGenerator.subclass(*args_, **kwargs_)
else:
return SineGenerator(*args_, **kwargs_)
factory = staticmethod(factory)
def get_delay(self): return self.delay
def set_delay(self, delay): self.delay = delay
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_phase(self): return self.phase
def set_phase(self, phase): self.phase = phase
def validate_Nml2Quantity_none(self, value):
# Validate type Nml2Quantity_none, a restriction on xs:string.
pass
def get_duration(self): return self.duration
def set_duration(self, duration): self.duration = duration
def get_period(self): return self.period
def set_period(self, period): self.period = period
def get_amplitude(self): return self.amplitude
def set_amplitude(self, amplitude): self.amplitude = amplitude
def validate_Nml2Quantity_current(self, value):
# Validate type Nml2Quantity_current, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(SineGenerator, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SineGenerator', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SineGenerator')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SineGenerator'):
super(SineGenerator, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SineGenerator')
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
outfile.write(' delay=%s' % (quote_attrib(self.delay), ))
if self.phase is not None and 'phase' not in already_processed:
already_processed.add('phase')
outfile.write(' phase=%s' % (quote_attrib(self.phase), ))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
outfile.write(' duration=%s' % (quote_attrib(self.duration), ))
if self.period is not None and 'period' not in already_processed:
already_processed.add('period')
outfile.write(' period=%s' % (quote_attrib(self.period), ))
if self.amplitude is not None and 'amplitude' not in already_processed:
already_processed.add('amplitude')
outfile.write(' amplitude=%s' % (quote_attrib(self.amplitude), ))
def exportChildren(self, outfile, level, namespace_='', name_='SineGenerator', fromsubclass_=False, pretty_print=True):
super(SineGenerator, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SineGenerator'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
showIndent(outfile, level)
outfile.write('delay="%s",\n' % (self.delay,))
if self.phase is not None and 'phase' not in already_processed:
already_processed.add('phase')
showIndent(outfile, level)
outfile.write('phase="%s",\n' % (self.phase,))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
showIndent(outfile, level)
outfile.write('duration="%s",\n' % (self.duration,))
if self.period is not None and 'period' not in already_processed:
already_processed.add('period')
showIndent(outfile, level)
outfile.write('period="%s",\n' % (self.period,))
if self.amplitude is not None and 'amplitude' not in already_processed:
already_processed.add('amplitude')
showIndent(outfile, level)
outfile.write('amplitude="%s",\n' % (self.amplitude,))
super(SineGenerator, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SineGenerator, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('delay', node)
if value is not None and 'delay' not in already_processed:
already_processed.add('delay')
self.delay = value
self.validate_Nml2Quantity_time(self.delay) # validate type Nml2Quantity_time
value = find_attr_value_('phase', node)
if value is not None and 'phase' not in already_processed:
already_processed.add('phase')
self.phase = value
self.validate_Nml2Quantity_none(self.phase) # validate type Nml2Quantity_none
value = find_attr_value_('duration', node)
if value is not None and 'duration' not in already_processed:
already_processed.add('duration')
self.duration = value
self.validate_Nml2Quantity_time(self.duration) # validate type Nml2Quantity_time
value = find_attr_value_('period', node)
if value is not None and 'period' not in already_processed:
already_processed.add('period')
self.period = value
self.validate_Nml2Quantity_time(self.period) # validate type Nml2Quantity_time
value = find_attr_value_('amplitude', node)
if value is not None and 'amplitude' not in already_processed:
already_processed.add('amplitude')
self.amplitude = value
self.validate_Nml2Quantity_current(self.amplitude) # validate type Nml2Quantity_current
super(SineGenerator, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SineGenerator, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SineGenerator
class PulseGenerator(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, delay=None, duration=None, amplitude=None):
super(PulseGenerator, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.delay = _cast(None, delay)
self.duration = _cast(None, duration)
self.amplitude = _cast(None, amplitude)
pass
def factory(*args_, **kwargs_):
if PulseGenerator.subclass:
return PulseGenerator.subclass(*args_, **kwargs_)
else:
return PulseGenerator(*args_, **kwargs_)
factory = staticmethod(factory)
def get_delay(self): return self.delay
def set_delay(self, delay): self.delay = delay
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_duration(self): return self.duration
def set_duration(self, duration): self.duration = duration
def get_amplitude(self): return self.amplitude
def set_amplitude(self, amplitude): self.amplitude = amplitude
def validate_Nml2Quantity_current(self, value):
# Validate type Nml2Quantity_current, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(PulseGenerator, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='PulseGenerator', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PulseGenerator')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PulseGenerator'):
super(PulseGenerator, self).exportAttributes(outfile, level, already_processed, namespace_, name_='PulseGenerator')
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
outfile.write(' delay=%s' % (quote_attrib(self.delay), ))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
outfile.write(' duration=%s' % (quote_attrib(self.duration), ))
if self.amplitude is not None and 'amplitude' not in already_processed:
already_processed.add('amplitude')
outfile.write(' amplitude=%s' % (quote_attrib(self.amplitude), ))
def exportChildren(self, outfile, level, namespace_='', name_='PulseGenerator', fromsubclass_=False, pretty_print=True):
super(PulseGenerator, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='PulseGenerator'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.delay is not None and 'delay' not in already_processed:
already_processed.add('delay')
showIndent(outfile, level)
outfile.write('delay="%s",\n' % (self.delay,))
if self.duration is not None and 'duration' not in already_processed:
already_processed.add('duration')
showIndent(outfile, level)
outfile.write('duration="%s",\n' % (self.duration,))
if self.amplitude is not None and 'amplitude' not in already_processed:
already_processed.add('amplitude')
showIndent(outfile, level)
outfile.write('amplitude="%s",\n' % (self.amplitude,))
super(PulseGenerator, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(PulseGenerator, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('delay', node)
if value is not None and 'delay' not in already_processed:
already_processed.add('delay')
self.delay = value
self.validate_Nml2Quantity_time(self.delay) # validate type Nml2Quantity_time
value = find_attr_value_('duration', node)
if value is not None and 'duration' not in already_processed:
already_processed.add('duration')
self.duration = value
self.validate_Nml2Quantity_time(self.duration) # validate type Nml2Quantity_time
value = find_attr_value_('amplitude', node)
if value is not None and 'amplitude' not in already_processed:
already_processed.add('amplitude')
self.amplitude = value
self.validate_Nml2Quantity_current(self.amplitude) # validate type Nml2Quantity_current
super(PulseGenerator, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(PulseGenerator, self).buildChildren(child_, node, nodeName_, True)
pass
# end class PulseGenerator
class ReactionScheme(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, source=None, type_=None, anytypeobjs_=None):
super(ReactionScheme, self).__init__(id, neuroLexId, )
self.source = _cast(None, source)
self.type_ = _cast(None, type_)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if ReactionScheme.subclass:
return ReactionScheme.subclass(*args_, **kwargs_)
else:
return ReactionScheme(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_source(self): return self.source
def set_source(self, source): self.source = source
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def hasContent_(self):
if (
self.anytypeobjs_ or
super(ReactionScheme, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReactionScheme', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReactionScheme')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReactionScheme'):
super(ReactionScheme, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReactionScheme')
if self.source is not None and 'source' not in already_processed:
already_processed.add('source')
outfile.write(' source=%s' % (self.gds_format_string(quote_attrib(self.source).encode(ExternalEncoding), input_name='source'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ReactionScheme', fromsubclass_=False, pretty_print=True):
super(ReactionScheme, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ReactionScheme'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.source is not None and 'source' not in already_processed:
already_processed.add('source')
showIndent(outfile, level)
outfile.write('source="%s",\n' % (self.source,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(ReactionScheme, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReactionScheme, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('source', node)
if value is not None and 'source' not in already_processed:
already_processed.add('source')
self.source = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
super(ReactionScheme, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'ReactionScheme')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(ReactionScheme, self).buildChildren(child_, node, nodeName_, True)
# end class ReactionScheme
class ExtracellularProperties(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, temperature=None, species=None):
super(ExtracellularProperties, self).__init__(id, neuroLexId, )
self.temperature = _cast(None, temperature)
if species is None:
self.species = []
else:
self.species = species
def factory(*args_, **kwargs_):
if ExtracellularProperties.subclass:
return ExtracellularProperties.subclass(*args_, **kwargs_)
else:
return ExtracellularProperties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_species(self): return self.species
def set_species(self, species): self.species = species
def add_species(self, value): self.species.append(value)
def insert_species(self, index, value): self.species[index] = value
def get_temperature(self): return self.temperature
def set_temperature(self, temperature): self.temperature = temperature
def validate_Nml2Quantity_temperature(self, value):
# Validate type Nml2Quantity_temperature, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.species or
super(ExtracellularProperties, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExtracellularProperties', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExtracellularProperties')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExtracellularProperties'):
super(ExtracellularProperties, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ExtracellularProperties')
if self.temperature is not None and 'temperature' not in already_processed:
already_processed.add('temperature')
outfile.write(' temperature=%s' % (quote_attrib(self.temperature), ))
def exportChildren(self, outfile, level, namespace_='', name_='ExtracellularProperties', fromsubclass_=False, pretty_print=True):
super(ExtracellularProperties, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for species_ in self.species:
species_.export(outfile, level, namespace_, name_='species', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ExtracellularProperties'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.temperature is not None and 'temperature' not in already_processed:
already_processed.add('temperature')
showIndent(outfile, level)
outfile.write('temperature="%s",\n' % (self.temperature,))
super(ExtracellularProperties, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ExtracellularProperties, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('species=[\n')
level += 1
for species_ in self.species:
showIndent(outfile, level)
outfile.write('model_.Species(\n')
species_.exportLiteral(outfile, level, name_='Species')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('temperature', node)
if value is not None and 'temperature' not in already_processed:
already_processed.add('temperature')
self.temperature = value
self.validate_Nml2Quantity_temperature(self.temperature) # validate type Nml2Quantity_temperature
super(ExtracellularProperties, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'species':
obj_ = Species.factory()
obj_.build(child_)
self.species.append(obj_)
super(ExtracellularProperties, self).buildChildren(child_, node, nodeName_, True)
# end class ExtracellularProperties
class ChannelDensity(Base):
"""Specifying the ion here again is redundant, this will be set in
ionChannel. It is added here TEMPORARILY as selecting all ca or
na conducting channel populations/densities in a cell would be
difficult otherwise. It should be removed in the longer term,
due to possible inconsistencies in this value and that in the
ionChannel element. TODO: remove."""
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, segmentGroup='all', ion=None, ionChannel=None, erev=None, condDensity=None, segment=None, variableParameter=None):
super(ChannelDensity, self).__init__(id, neuroLexId, )
self.segmentGroup = _cast(None, segmentGroup)
self.ion = _cast(None, ion)
self.ionChannel = _cast(None, ionChannel)
self.erev = _cast(None, erev)
self.condDensity = _cast(None, condDensity)
self.segment = _cast(None, segment)
if variableParameter is None:
self.variableParameter = []
else:
self.variableParameter = variableParameter
def factory(*args_, **kwargs_):
if ChannelDensity.subclass:
return ChannelDensity.subclass(*args_, **kwargs_)
else:
return ChannelDensity(*args_, **kwargs_)
factory = staticmethod(factory)
def get_variableParameter(self): return self.variableParameter
def set_variableParameter(self, variableParameter): self.variableParameter = variableParameter
def add_variableParameter(self, value): self.variableParameter.append(value)
def insert_variableParameter(self, index, value): self.variableParameter[index] = value
def get_segmentGroup(self): return self.segmentGroup
def set_segmentGroup(self, segmentGroup): self.segmentGroup = segmentGroup
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_ion(self): return self.ion
def set_ion(self, ion): self.ion = ion
def get_ionChannel(self): return self.ionChannel
def set_ionChannel(self, ionChannel): self.ionChannel = ionChannel
def get_erev(self): return self.erev
def set_erev(self, erev): self.erev = erev
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_condDensity(self): return self.condDensity
def set_condDensity(self, condDensity): self.condDensity = condDensity
def validate_Nml2Quantity_conductanceDensity(self, value):
# Validate type Nml2Quantity_conductanceDensity, a restriction on xs:string.
pass
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def hasContent_(self):
if (
self.variableParameter or
super(ChannelDensity, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ChannelDensity', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ChannelDensity')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ChannelDensity'):
super(ChannelDensity, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ChannelDensity')
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
outfile.write(' segmentGroup=%s' % (quote_attrib(self.segmentGroup), ))
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
outfile.write(' ion=%s' % (quote_attrib(self.ion), ))
if self.ionChannel is not None and 'ionChannel' not in already_processed:
already_processed.add('ionChannel')
outfile.write(' ionChannel=%s' % (quote_attrib(self.ionChannel), ))
if self.erev is not None and 'erev' not in already_processed:
already_processed.add('erev')
outfile.write(' erev=%s' % (quote_attrib(self.erev), ))
if self.condDensity is not None and 'condDensity' not in already_processed:
already_processed.add('condDensity')
outfile.write(' condDensity=%s' % (quote_attrib(self.condDensity), ))
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
outfile.write(' segment=%s' % (quote_attrib(self.segment), ))
def exportChildren(self, outfile, level, namespace_='', name_='ChannelDensity', fromsubclass_=False, pretty_print=True):
super(ChannelDensity, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for variableParameter_ in self.variableParameter:
variableParameter_.export(outfile, level, namespace_, name_='variableParameter', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ChannelDensity'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
showIndent(outfile, level)
outfile.write('segmentGroup="%s",\n' % (self.segmentGroup,))
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
showIndent(outfile, level)
outfile.write('ion="%s",\n' % (self.ion,))
if self.ionChannel is not None and 'ionChannel' not in already_processed:
already_processed.add('ionChannel')
showIndent(outfile, level)
outfile.write('ionChannel="%s",\n' % (self.ionChannel,))
if self.erev is not None and 'erev' not in already_processed:
already_processed.add('erev')
showIndent(outfile, level)
outfile.write('erev="%s",\n' % (self.erev,))
if self.condDensity is not None and 'condDensity' not in already_processed:
already_processed.add('condDensity')
showIndent(outfile, level)
outfile.write('condDensity="%s",\n' % (self.condDensity,))
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
showIndent(outfile, level)
outfile.write('segment="%s",\n' % (self.segment,))
super(ChannelDensity, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ChannelDensity, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('variableParameter=[\n')
level += 1
for variableParameter_ in self.variableParameter:
showIndent(outfile, level)
outfile.write('model_.VariableParameter(\n')
variableParameter_.exportLiteral(outfile, level, name_='VariableParameter')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('segmentGroup', node)
if value is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
self.segmentGroup = value
self.validate_NmlId(self.segmentGroup) # validate type NmlId
value = find_attr_value_('ion', node)
if value is not None and 'ion' not in already_processed:
already_processed.add('ion')
self.ion = value
self.validate_NmlId(self.ion) # validate type NmlId
value = find_attr_value_('ionChannel', node)
if value is not None and 'ionChannel' not in already_processed:
already_processed.add('ionChannel')
self.ionChannel = value
self.validate_NmlId(self.ionChannel) # validate type NmlId
value = find_attr_value_('erev', node)
if value is not None and 'erev' not in already_processed:
already_processed.add('erev')
self.erev = value
self.validate_Nml2Quantity_voltage(self.erev) # validate type Nml2Quantity_voltage
value = find_attr_value_('condDensity', node)
if value is not None and 'condDensity' not in already_processed:
already_processed.add('condDensity')
self.condDensity = value
self.validate_Nml2Quantity_conductanceDensity(self.condDensity) # validate type Nml2Quantity_conductanceDensity
value = find_attr_value_('segment', node)
if value is not None and 'segment' not in already_processed:
already_processed.add('segment')
self.segment = value
self.validate_NmlId(self.segment) # validate type NmlId
super(ChannelDensity, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'variableParameter':
obj_ = VariableParameter.factory()
obj_.build(child_)
self.variableParameter.append(obj_)
super(ChannelDensity, self).buildChildren(child_, node, nodeName_, True)
# end class ChannelDensity
class ChannelPopulation(Base):
"""Specifying the ion here again is redundant, this will be set in
ionChannel. It is added here TEMPORARILY as selecting all ca or
na conducting channel populations/densities in a cell would be
difficult otherwise. It should be removed in the longer term,
due to possible inconsistencies in this value and that in the
ionChannel element. TODO: remove."""
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, segmentGroup='all', ion=None, number=None, ionChannel=None, erev=None, segment=None, variableParameter=None):
super(ChannelPopulation, self).__init__(id, neuroLexId, )
self.segmentGroup = _cast(None, segmentGroup)
self.ion = _cast(None, ion)
self.number = _cast(int, number)
self.ionChannel = _cast(None, ionChannel)
self.erev = _cast(None, erev)
self.segment = _cast(None, segment)
if variableParameter is None:
self.variableParameter = []
else:
self.variableParameter = variableParameter
def factory(*args_, **kwargs_):
if ChannelPopulation.subclass:
return ChannelPopulation.subclass(*args_, **kwargs_)
else:
return ChannelPopulation(*args_, **kwargs_)
factory = staticmethod(factory)
def get_variableParameter(self): return self.variableParameter
def set_variableParameter(self, variableParameter): self.variableParameter = variableParameter
def add_variableParameter(self, value): self.variableParameter.append(value)
def insert_variableParameter(self, index, value): self.variableParameter[index] = value
def get_segmentGroup(self): return self.segmentGroup
def set_segmentGroup(self, segmentGroup): self.segmentGroup = segmentGroup
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_ion(self): return self.ion
def set_ion(self, ion): self.ion = ion
def get_number(self): return self.number
def set_number(self, number): self.number = number
def get_ionChannel(self): return self.ionChannel
def set_ionChannel(self, ionChannel): self.ionChannel = ionChannel
def get_erev(self): return self.erev
def set_erev(self, erev): self.erev = erev
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def hasContent_(self):
if (
self.variableParameter or
super(ChannelPopulation, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ChannelPopulation', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ChannelPopulation')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ChannelPopulation'):
super(ChannelPopulation, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ChannelPopulation')
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
outfile.write(' segmentGroup=%s' % (quote_attrib(self.segmentGroup), ))
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
outfile.write(' ion=%s' % (quote_attrib(self.ion), ))
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
outfile.write(' number="%s"' % self.gds_format_integer(self.number, input_name='number'))
if self.ionChannel is not None and 'ionChannel' not in already_processed:
already_processed.add('ionChannel')
outfile.write(' ionChannel=%s' % (quote_attrib(self.ionChannel), ))
if self.erev is not None and 'erev' not in already_processed:
already_processed.add('erev')
outfile.write(' erev=%s' % (quote_attrib(self.erev), ))
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
outfile.write(' segment=%s' % (quote_attrib(self.segment), ))
def exportChildren(self, outfile, level, namespace_='', name_='ChannelPopulation', fromsubclass_=False, pretty_print=True):
super(ChannelPopulation, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for variableParameter_ in self.variableParameter:
variableParameter_.export(outfile, level, namespace_, name_='variableParameter', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ChannelPopulation'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.segmentGroup is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
showIndent(outfile, level)
outfile.write('segmentGroup="%s",\n' % (self.segmentGroup,))
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
showIndent(outfile, level)
outfile.write('ion="%s",\n' % (self.ion,))
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
showIndent(outfile, level)
outfile.write('number=%d,\n' % (self.number,))
if self.ionChannel is not None and 'ionChannel' not in already_processed:
already_processed.add('ionChannel')
showIndent(outfile, level)
outfile.write('ionChannel="%s",\n' % (self.ionChannel,))
if self.erev is not None and 'erev' not in already_processed:
already_processed.add('erev')
showIndent(outfile, level)
outfile.write('erev="%s",\n' % (self.erev,))
if self.segment is not None and 'segment' not in already_processed:
already_processed.add('segment')
showIndent(outfile, level)
outfile.write('segment="%s",\n' % (self.segment,))
super(ChannelPopulation, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ChannelPopulation, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('variableParameter=[\n')
level += 1
for variableParameter_ in self.variableParameter:
showIndent(outfile, level)
outfile.write('model_.VariableParameter(\n')
variableParameter_.exportLiteral(outfile, level, name_='VariableParameter')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('segmentGroup', node)
if value is not None and 'segmentGroup' not in already_processed:
already_processed.add('segmentGroup')
self.segmentGroup = value
self.validate_NmlId(self.segmentGroup) # validate type NmlId
value = find_attr_value_('ion', node)
if value is not None and 'ion' not in already_processed:
already_processed.add('ion')
self.ion = value
self.validate_NmlId(self.ion) # validate type NmlId
value = find_attr_value_('number', node)
if value is not None and 'number' not in already_processed:
already_processed.add('number')
try:
self.number = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.number < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('ionChannel', node)
if value is not None and 'ionChannel' not in already_processed:
already_processed.add('ionChannel')
self.ionChannel = value
self.validate_NmlId(self.ionChannel) # validate type NmlId
value = find_attr_value_('erev', node)
if value is not None and 'erev' not in already_processed:
already_processed.add('erev')
self.erev = value
self.validate_Nml2Quantity_voltage(self.erev) # validate type Nml2Quantity_voltage
value = find_attr_value_('segment', node)
if value is not None and 'segment' not in already_processed:
already_processed.add('segment')
self.segment = value
self.validate_NmlId(self.segment) # validate type NmlId
super(ChannelPopulation, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'variableParameter':
obj_ = VariableParameter.factory()
obj_.build(child_)
self.variableParameter.append(obj_)
super(ChannelPopulation, self).buildChildren(child_, node, nodeName_, True)
# end class ChannelPopulation
class BiophysicalProperties(Standalone):
"""Standalone element which is usually inside a single cell, but could
be outside and referenced by id."""
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, membraneProperties=None, intracellularProperties=None, extracellularProperties=None):
super(BiophysicalProperties, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.membraneProperties = membraneProperties
self.intracellularProperties = intracellularProperties
self.extracellularProperties = extracellularProperties
def factory(*args_, **kwargs_):
if BiophysicalProperties.subclass:
return BiophysicalProperties.subclass(*args_, **kwargs_)
else:
return BiophysicalProperties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_membraneProperties(self): return self.membraneProperties
def set_membraneProperties(self, membraneProperties): self.membraneProperties = membraneProperties
def get_intracellularProperties(self): return self.intracellularProperties
def set_intracellularProperties(self, intracellularProperties): self.intracellularProperties = intracellularProperties
def get_extracellularProperties(self): return self.extracellularProperties
def set_extracellularProperties(self, extracellularProperties): self.extracellularProperties = extracellularProperties
def hasContent_(self):
if (
self.membraneProperties is not None or
self.intracellularProperties is not None or
self.extracellularProperties is not None or
super(BiophysicalProperties, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BiophysicalProperties', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BiophysicalProperties')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BiophysicalProperties'):
super(BiophysicalProperties, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BiophysicalProperties')
def exportChildren(self, outfile, level, namespace_='', name_='BiophysicalProperties', fromsubclass_=False, pretty_print=True):
super(BiophysicalProperties, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.membraneProperties is not None:
self.membraneProperties.export(outfile, level, namespace_, name_='membraneProperties', pretty_print=pretty_print)
if self.intracellularProperties is not None:
self.intracellularProperties.export(outfile, level, namespace_, name_='intracellularProperties', pretty_print=pretty_print)
if self.extracellularProperties is not None:
self.extracellularProperties.export(outfile, level, namespace_, name_='extracellularProperties', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='BiophysicalProperties'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(BiophysicalProperties, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BiophysicalProperties, self).exportLiteralChildren(outfile, level, name_)
if self.membraneProperties is not None:
showIndent(outfile, level)
outfile.write('membraneProperties=model_.MembraneProperties(\n')
self.membraneProperties.exportLiteral(outfile, level, name_='membraneProperties')
showIndent(outfile, level)
outfile.write('),\n')
if self.intracellularProperties is not None:
showIndent(outfile, level)
outfile.write('intracellularProperties=model_.IntracellularProperties(\n')
self.intracellularProperties.exportLiteral(outfile, level, name_='intracellularProperties')
showIndent(outfile, level)
outfile.write('),\n')
if self.extracellularProperties is not None:
showIndent(outfile, level)
outfile.write('extracellularProperties=model_.ExtracellularProperties(\n')
self.extracellularProperties.exportLiteral(outfile, level, name_='extracellularProperties')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(BiophysicalProperties, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'membraneProperties':
obj_ = MembraneProperties.factory()
obj_.build(child_)
self.set_membraneProperties(obj_)
elif nodeName_ == 'intracellularProperties':
obj_ = IntracellularProperties.factory()
obj_.build(child_)
self.set_intracellularProperties(obj_)
elif nodeName_ == 'extracellularProperties':
obj_ = ExtracellularProperties.factory()
obj_.build(child_)
self.set_extracellularProperties(obj_)
super(BiophysicalProperties, self).buildChildren(child_, node, nodeName_, True)
# end class BiophysicalProperties
class InhomogeneousParam(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, variable=None, metric=None, proximal=None, distal=None):
super(InhomogeneousParam, self).__init__(id, neuroLexId, )
self.variable = _cast(None, variable)
self.metric = _cast(None, metric)
self.proximal = proximal
self.distal = distal
def factory(*args_, **kwargs_):
if InhomogeneousParam.subclass:
return InhomogeneousParam.subclass(*args_, **kwargs_)
else:
return InhomogeneousParam(*args_, **kwargs_)
factory = staticmethod(factory)
def get_proximal(self): return self.proximal
def set_proximal(self, proximal): self.proximal = proximal
def get_distal(self): return self.distal
def set_distal(self, distal): self.distal = distal
def get_variable(self): return self.variable
def set_variable(self, variable): self.variable = variable
def get_metric(self): return self.metric
def set_metric(self, metric): self.metric = metric
def validate_Metric(self, value):
# Validate type Metric, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.proximal is not None or
self.distal is not None or
super(InhomogeneousParam, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='InhomogeneousParam', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='InhomogeneousParam')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InhomogeneousParam'):
super(InhomogeneousParam, self).exportAttributes(outfile, level, already_processed, namespace_, name_='InhomogeneousParam')
if self.variable is not None and 'variable' not in already_processed:
already_processed.add('variable')
outfile.write(' variable=%s' % (self.gds_format_string(quote_attrib(self.variable).encode(ExternalEncoding), input_name='variable'), ))
if self.metric is not None and 'metric' not in already_processed:
already_processed.add('metric')
outfile.write(' metric=%s' % (quote_attrib(self.metric), ))
def exportChildren(self, outfile, level, namespace_='', name_='InhomogeneousParam', fromsubclass_=False, pretty_print=True):
super(InhomogeneousParam, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.proximal is not None:
self.proximal.export(outfile, level, namespace_, name_='proximal', pretty_print=pretty_print)
if self.distal is not None:
self.distal.export(outfile, level, namespace_, name_='distal', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='InhomogeneousParam'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.variable is not None and 'variable' not in already_processed:
already_processed.add('variable')
showIndent(outfile, level)
outfile.write('variable="%s",\n' % (self.variable,))
if self.metric is not None and 'metric' not in already_processed:
already_processed.add('metric')
showIndent(outfile, level)
outfile.write('metric="%s",\n' % (self.metric,))
super(InhomogeneousParam, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(InhomogeneousParam, self).exportLiteralChildren(outfile, level, name_)
if self.proximal is not None:
showIndent(outfile, level)
outfile.write('proximal=model_.ProximalDetails(\n')
self.proximal.exportLiteral(outfile, level, name_='proximal')
showIndent(outfile, level)
outfile.write('),\n')
if self.distal is not None:
showIndent(outfile, level)
outfile.write('distal=model_.DistalDetails(\n')
self.distal.exportLiteral(outfile, level, name_='distal')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('variable', node)
if value is not None and 'variable' not in already_processed:
already_processed.add('variable')
self.variable = value
value = find_attr_value_('metric', node)
if value is not None and 'metric' not in already_processed:
already_processed.add('metric')
self.metric = value
self.validate_Metric(self.metric) # validate type Metric
super(InhomogeneousParam, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'proximal':
obj_ = ProximalDetails.factory()
obj_.build(child_)
self.set_proximal(obj_)
elif nodeName_ == 'distal':
obj_ = DistalDetails.factory()
obj_.build(child_)
self.set_distal(obj_)
super(InhomogeneousParam, self).buildChildren(child_, node, nodeName_, True)
# end class InhomogeneousParam
class SegmentGroup(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, member=None, include=None, path=None, subTree=None, inhomogeneousParam=None):
super(SegmentGroup, self).__init__(id, neuroLexId, )
if member is None:
self.member = []
else:
self.member = member
if include is None:
self.include = []
else:
self.include = include
if path is None:
self.path = []
else:
self.path = path
if subTree is None:
self.subTree = []
else:
self.subTree = subTree
if inhomogeneousParam is None:
self.inhomogeneousParam = []
else:
self.inhomogeneousParam = inhomogeneousParam
def factory(*args_, **kwargs_):
if SegmentGroup.subclass:
return SegmentGroup.subclass(*args_, **kwargs_)
else:
return SegmentGroup(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_include(self): return self.include
def set_include(self, include): self.include = include
def add_include(self, value): self.include.append(value)
def insert_include(self, index, value): self.include[index] = value
def get_path(self): return self.path
def set_path(self, path): self.path = path
def add_path(self, value): self.path.append(value)
def insert_path(self, index, value): self.path[index] = value
def get_subTree(self): return self.subTree
def set_subTree(self, subTree): self.subTree = subTree
def add_subTree(self, value): self.subTree.append(value)
def insert_subTree(self, index, value): self.subTree[index] = value
def get_inhomogeneousParam(self): return self.inhomogeneousParam
def set_inhomogeneousParam(self, inhomogeneousParam): self.inhomogeneousParam = inhomogeneousParam
def add_inhomogeneousParam(self, value): self.inhomogeneousParam.append(value)
def insert_inhomogeneousParam(self, index, value): self.inhomogeneousParam[index] = value
def hasContent_(self):
if (
self.member or
self.include or
self.path or
self.subTree or
self.inhomogeneousParam or
super(SegmentGroup, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SegmentGroup', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SegmentGroup')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SegmentGroup'):
super(SegmentGroup, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SegmentGroup')
def exportChildren(self, outfile, level, namespace_='', name_='SegmentGroup', fromsubclass_=False, pretty_print=True):
super(SegmentGroup, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member', pretty_print=pretty_print)
for include_ in self.include:
include_.export(outfile, level, namespace_, name_='include', pretty_print=pretty_print)
for path_ in self.path:
path_.export(outfile, level, namespace_, name_='path', pretty_print=pretty_print)
for subTree_ in self.subTree:
subTree_.export(outfile, level, namespace_, name_='subTree', pretty_print=pretty_print)
for inhomogeneousParam_ in self.inhomogeneousParam:
inhomogeneousParam_.export(outfile, level, namespace_, name_='inhomogeneousParam', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SegmentGroup'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(SegmentGroup, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SegmentGroup, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member_ in self.member:
showIndent(outfile, level)
outfile.write('model_.Member(\n')
member_.exportLiteral(outfile, level, name_='Member')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('include=[\n')
level += 1
for include_ in self.include:
showIndent(outfile, level)
outfile.write('model_.Include(\n')
include_.exportLiteral(outfile, level, name_='Include')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('path=[\n')
level += 1
for path_ in self.path:
showIndent(outfile, level)
outfile.write('model_.Path(\n')
path_.exportLiteral(outfile, level, name_='Path')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('subTree=[\n')
level += 1
for subTree_ in self.subTree:
showIndent(outfile, level)
outfile.write('model_.SubTree(\n')
subTree_.exportLiteral(outfile, level, name_='SubTree')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('inhomogeneousParam=[\n')
level += 1
for inhomogeneousParam_ in self.inhomogeneousParam:
showIndent(outfile, level)
outfile.write('model_.InhomogeneousParam(\n')
inhomogeneousParam_.exportLiteral(outfile, level, name_='InhomogeneousParam')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(SegmentGroup, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'member':
obj_ = Member.factory()
obj_.build(child_)
self.member.append(obj_)
elif nodeName_ == 'include':
obj_ = Include.factory()
obj_.build(child_)
self.include.append(obj_)
elif nodeName_ == 'path':
obj_ = Path.factory()
obj_.build(child_)
self.path.append(obj_)
elif nodeName_ == 'subTree':
obj_ = SubTree.factory()
obj_.build(child_)
self.subTree.append(obj_)
elif nodeName_ == 'inhomogeneousParam':
obj_ = InhomogeneousParam.factory()
obj_.build(child_)
self.inhomogeneousParam.append(obj_)
super(SegmentGroup, self).buildChildren(child_, node, nodeName_, True)
# end class SegmentGroup
class Segment(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, name=None, parent=None, proximal=None, distal=None):
super(Segment, self).__init__(id, neuroLexId, )
self.name = _cast(None, name)
self.parent = parent
self.proximal = proximal
self.distal = distal
def factory(*args_, **kwargs_):
if Segment.subclass:
return Segment.subclass(*args_, **kwargs_)
else:
return Segment(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parent(self): return self.parent
def set_parent(self, parent): self.parent = parent
def get_proximal(self): return self.proximal
def set_proximal(self, proximal): self.proximal = proximal
def get_distal(self): return self.distal
def set_distal(self, distal): self.distal = distal
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
self.parent is not None or
self.proximal is not None or
self.distal is not None or
super(Segment, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Segment', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Segment')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Segment'):
super(Segment, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Segment')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='Segment', fromsubclass_=False, pretty_print=True):
super(Segment, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.parent is not None:
self.parent.export(outfile, level, namespace_, name_='parent', pretty_print=pretty_print)
if self.proximal is not None:
self.proximal.export(outfile, level, namespace_, name_='proximal', pretty_print=pretty_print)
if self.distal is not None:
self.distal.export(outfile, level, namespace_, name_='distal', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Segment'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(Segment, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Segment, self).exportLiteralChildren(outfile, level, name_)
if self.parent is not None:
showIndent(outfile, level)
outfile.write('parent=model_.SegmentParent(\n')
self.parent.exportLiteral(outfile, level, name_='parent')
showIndent(outfile, level)
outfile.write('),\n')
if self.proximal is not None:
showIndent(outfile, level)
outfile.write('proximal=model_.Point3DWithDiam(\n')
self.proximal.exportLiteral(outfile, level, name_='proximal')
showIndent(outfile, level)
outfile.write('),\n')
if self.distal is not None:
showIndent(outfile, level)
outfile.write('distal=model_.Point3DWithDiam(\n')
self.distal.exportLiteral(outfile, level, name_='distal')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
super(Segment, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'parent':
obj_ = SegmentParent.factory()
obj_.build(child_)
self.set_parent(obj_)
elif nodeName_ == 'proximal':
obj_ = Point3DWithDiam.factory()
obj_.build(child_)
self.set_proximal(obj_)
elif nodeName_ == 'distal':
obj_ = Point3DWithDiam.factory()
obj_.build(child_)
self.set_distal(obj_)
super(Segment, self).buildChildren(child_, node, nodeName_, True)
# end class Segment
class Morphology(Standalone):
"""Standalone element which is usually inside a single cell, but could
be outside and referenced by id."""
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, segment=None, segmentGroup=None):
super(Morphology, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
if segment is None:
self.segment = []
else:
self.segment = segment
if segmentGroup is None:
self.segmentGroup = []
else:
self.segmentGroup = segmentGroup
def factory(*args_, **kwargs_):
if Morphology.subclass:
return Morphology.subclass(*args_, **kwargs_)
else:
return Morphology(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def add_segment(self, value): self.segment.append(value)
def insert_segment(self, index, value): self.segment[index] = value
def get_segmentGroup(self): return self.segmentGroup
def set_segmentGroup(self, segmentGroup): self.segmentGroup = segmentGroup
def add_segmentGroup(self, value): self.segmentGroup.append(value)
def insert_segmentGroup(self, index, value): self.segmentGroup[index] = value
def hasContent_(self):
if (
self.segment or
self.segmentGroup or
super(Morphology, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Morphology', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Morphology')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Morphology'):
super(Morphology, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Morphology')
def exportChildren(self, outfile, level, namespace_='', name_='Morphology', fromsubclass_=False, pretty_print=True):
super(Morphology, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for segment_ in self.segment:
segment_.export(outfile, level, namespace_, name_='segment', pretty_print=pretty_print)
for segmentGroup_ in self.segmentGroup:
segmentGroup_.export(outfile, level, namespace_, name_='segmentGroup', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Morphology'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Morphology, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Morphology, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('segment=[\n')
level += 1
for segment_ in self.segment:
showIndent(outfile, level)
outfile.write('model_.Segment(\n')
segment_.exportLiteral(outfile, level, name_='Segment')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('segmentGroup=[\n')
level += 1
for segmentGroup_ in self.segmentGroup:
showIndent(outfile, level)
outfile.write('model_.SegmentGroup(\n')
segmentGroup_.exportLiteral(outfile, level, name_='SegmentGroup')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Morphology, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'segment':
obj_ = Segment.factory()
obj_.build(child_)
self.segment.append(obj_)
elif nodeName_ == 'segmentGroup':
obj_ = SegmentGroup.factory()
obj_.build(child_)
self.segmentGroup.append(obj_)
super(Morphology, self).buildChildren(child_, node, nodeName_, True)
# end class Morphology
class BaseCell(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, extensiontype_=None):
super(BaseCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if BaseCell.subclass:
return BaseCell.subclass(*args_, **kwargs_)
else:
return BaseCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(BaseCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BaseCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BaseCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BaseCell'):
super(BaseCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BaseCell')
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='BaseCell', fromsubclass_=False, pretty_print=True):
super(BaseCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='BaseCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(BaseCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BaseCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BaseCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BaseCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BaseCell
class BaseSynapse(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, extensiontype_=None):
super(BaseSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if BaseSynapse.subclass:
return BaseSynapse.subclass(*args_, **kwargs_)
else:
return BaseSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(BaseSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BaseSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BaseSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BaseSynapse'):
super(BaseSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BaseSynapse')
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='BaseSynapse', fromsubclass_=False, pretty_print=True):
super(BaseSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='BaseSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(BaseSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BaseSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BaseSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BaseSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BaseSynapse
class DecayingPoolConcentrationModel(Standalone):
"""Should not be required, as it's present on the species element!"""
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, ion=None, shellThickness=None, restingConc=None, decayConstant=None, extensiontype_=None):
super(DecayingPoolConcentrationModel, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.ion = _cast(None, ion)
self.shellThickness = _cast(None, shellThickness)
self.restingConc = _cast(None, restingConc)
self.decayConstant = _cast(None, decayConstant)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if DecayingPoolConcentrationModel.subclass:
return DecayingPoolConcentrationModel.subclass(*args_, **kwargs_)
else:
return DecayingPoolConcentrationModel(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ion(self): return self.ion
def set_ion(self, ion): self.ion = ion
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def get_shellThickness(self): return self.shellThickness
def set_shellThickness(self, shellThickness): self.shellThickness = shellThickness
def validate_Nml2Quantity_length(self, value):
# Validate type Nml2Quantity_length, a restriction on xs:string.
pass
def get_restingConc(self): return self.restingConc
def set_restingConc(self, restingConc): self.restingConc = restingConc
def validate_Nml2Quantity_concentration(self, value):
# Validate type Nml2Quantity_concentration, a restriction on xs:string.
pass
def get_decayConstant(self): return self.decayConstant
def set_decayConstant(self, decayConstant): self.decayConstant = decayConstant
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(DecayingPoolConcentrationModel, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='DecayingPoolConcentrationModel', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DecayingPoolConcentrationModel')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DecayingPoolConcentrationModel'):
super(DecayingPoolConcentrationModel, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DecayingPoolConcentrationModel')
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
outfile.write(' ion=%s' % (quote_attrib(self.ion), ))
if self.shellThickness is not None and 'shellThickness' not in already_processed:
already_processed.add('shellThickness')
outfile.write(' shellThickness=%s' % (quote_attrib(self.shellThickness), ))
if self.restingConc is not None and 'restingConc' not in already_processed:
already_processed.add('restingConc')
outfile.write(' restingConc=%s' % (quote_attrib(self.restingConc), ))
if self.decayConstant is not None and 'decayConstant' not in already_processed:
already_processed.add('decayConstant')
outfile.write(' decayConstant=%s' % (quote_attrib(self.decayConstant), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='DecayingPoolConcentrationModel', fromsubclass_=False, pretty_print=True):
super(DecayingPoolConcentrationModel, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='DecayingPoolConcentrationModel'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ion is not None and 'ion' not in already_processed:
already_processed.add('ion')
showIndent(outfile, level)
outfile.write('ion="%s",\n' % (self.ion,))
if self.shellThickness is not None and 'shellThickness' not in already_processed:
already_processed.add('shellThickness')
showIndent(outfile, level)
outfile.write('shellThickness="%s",\n' % (self.shellThickness,))
if self.restingConc is not None and 'restingConc' not in already_processed:
already_processed.add('restingConc')
showIndent(outfile, level)
outfile.write('restingConc="%s",\n' % (self.restingConc,))
if self.decayConstant is not None and 'decayConstant' not in already_processed:
already_processed.add('decayConstant')
showIndent(outfile, level)
outfile.write('decayConstant="%s",\n' % (self.decayConstant,))
super(DecayingPoolConcentrationModel, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DecayingPoolConcentrationModel, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ion', node)
if value is not None and 'ion' not in already_processed:
already_processed.add('ion')
self.ion = value
self.validate_NmlId(self.ion) # validate type NmlId
value = find_attr_value_('shellThickness', node)
if value is not None and 'shellThickness' not in already_processed:
already_processed.add('shellThickness')
self.shellThickness = value
self.validate_Nml2Quantity_length(self.shellThickness) # validate type Nml2Quantity_length
value = find_attr_value_('restingConc', node)
if value is not None and 'restingConc' not in already_processed:
already_processed.add('restingConc')
self.restingConc = value
self.validate_Nml2Quantity_concentration(self.restingConc) # validate type Nml2Quantity_concentration
value = find_attr_value_('decayConstant', node)
if value is not None and 'decayConstant' not in already_processed:
already_processed.add('decayConstant')
self.decayConstant = value
self.validate_Nml2Quantity_time(self.decayConstant) # validate type Nml2Quantity_time
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(DecayingPoolConcentrationModel, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(DecayingPoolConcentrationModel, self).buildChildren(child_, node, nodeName_, True)
pass
# end class DecayingPoolConcentrationModel
class GateHHRatesInf(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, instances=1, type_=None, notes=None, q10Settings=None, forwardRate=None, reverseRate=None, steadyState=None):
super(GateHHRatesInf, self).__init__(id, neuroLexId, )
self.instances = _cast(int, instances)
self.type_ = _cast(None, type_)
self.notes = notes
self.q10Settings = q10Settings
self.forwardRate = forwardRate
self.reverseRate = reverseRate
self.steadyState = steadyState
def factory(*args_, **kwargs_):
if GateHHRatesInf.subclass:
return GateHHRatesInf.subclass(*args_, **kwargs_)
else:
return GateHHRatesInf(*args_, **kwargs_)
factory = staticmethod(factory)
def get_notes(self): return self.notes
def set_notes(self, notes): self.notes = notes
def validate_Notes(self, value):
# Validate type Notes, a restriction on xs:string.
pass
def get_q10Settings(self): return self.q10Settings
def set_q10Settings(self, q10Settings): self.q10Settings = q10Settings
def get_forwardRate(self): return self.forwardRate
def set_forwardRate(self, forwardRate): self.forwardRate = forwardRate
def get_reverseRate(self): return self.reverseRate
def set_reverseRate(self, reverseRate): self.reverseRate = reverseRate
def get_steadyState(self): return self.steadyState
def set_steadyState(self, steadyState): self.steadyState = steadyState
def get_instances(self): return self.instances
def set_instances(self, instances): self.instances = instances
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_gateTypes(self, value):
# Validate type gateTypes, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.notes is not None or
self.q10Settings is not None or
self.forwardRate is not None or
self.reverseRate is not None or
self.steadyState is not None or
super(GateHHRatesInf, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='GateHHRatesInf', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHRatesInf')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GateHHRatesInf'):
super(GateHHRatesInf, self).exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHRatesInf')
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
outfile.write(' instances="%s"' % self.gds_format_integer(self.instances, input_name='instances'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='', name_='GateHHRatesInf', fromsubclass_=False, pretty_print=True):
super(GateHHRatesInf, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.notes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snotes>%s</%snotes>%s' % (namespace_, self.gds_format_string(quote_xml(self.notes).encode(ExternalEncoding), input_name='notes'), namespace_, eol_))
if self.q10Settings is not None:
self.q10Settings.export(outfile, level, namespace_, name_='q10Settings', pretty_print=pretty_print)
if self.forwardRate is not None:
self.forwardRate.export(outfile, level, namespace_, name_='forwardRate', pretty_print=pretty_print)
if self.reverseRate is not None:
self.reverseRate.export(outfile, level, namespace_, name_='reverseRate', pretty_print=pretty_print)
if self.steadyState is not None:
self.steadyState.export(outfile, level, namespace_, name_='steadyState', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='GateHHRatesInf'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
showIndent(outfile, level)
outfile.write('instances=%d,\n' % (self.instances,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(GateHHRatesInf, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(GateHHRatesInf, self).exportLiteralChildren(outfile, level, name_)
if self.notes is not None:
showIndent(outfile, level)
outfile.write('notes=%s,\n' % quote_python(self.notes).encode(ExternalEncoding))
if self.q10Settings is not None:
showIndent(outfile, level)
outfile.write('q10Settings=model_.Q10Settings(\n')
self.q10Settings.exportLiteral(outfile, level, name_='q10Settings')
showIndent(outfile, level)
outfile.write('),\n')
if self.forwardRate is not None:
showIndent(outfile, level)
outfile.write('forwardRate=model_.HHRate(\n')
self.forwardRate.exportLiteral(outfile, level, name_='forwardRate')
showIndent(outfile, level)
outfile.write('),\n')
if self.reverseRate is not None:
showIndent(outfile, level)
outfile.write('reverseRate=model_.HHRate(\n')
self.reverseRate.exportLiteral(outfile, level, name_='reverseRate')
showIndent(outfile, level)
outfile.write('),\n')
if self.steadyState is not None:
showIndent(outfile, level)
outfile.write('steadyState=model_.HHVariable(\n')
self.steadyState.exportLiteral(outfile, level, name_='steadyState')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('instances', node)
if value is not None and 'instances' not in already_processed:
already_processed.add('instances')
try:
self.instances = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_gateTypes(self.type_) # validate type gateTypes
super(GateHHRatesInf, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'notes':
notes_ = child_.text
notes_ = self.gds_validate_string(notes_, node, 'notes')
self.notes = notes_
self.validate_Notes(self.notes) # validate type Notes
elif nodeName_ == 'q10Settings':
obj_ = Q10Settings.factory()
obj_.build(child_)
self.set_q10Settings(obj_)
elif nodeName_ == 'forwardRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_forwardRate(obj_)
elif nodeName_ == 'reverseRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_reverseRate(obj_)
elif nodeName_ == 'steadyState':
obj_ = HHVariable.factory()
obj_.build(child_)
self.set_steadyState(obj_)
super(GateHHRatesInf, self).buildChildren(child_, node, nodeName_, True)
# end class GateHHRatesInf
class GateHHRatesTau(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, instances=1, type_=None, notes=None, q10Settings=None, forwardRate=None, reverseRate=None, timeCourse=None):
super(GateHHRatesTau, self).__init__(id, neuroLexId, )
self.instances = _cast(int, instances)
self.type_ = _cast(None, type_)
self.notes = notes
self.q10Settings = q10Settings
self.forwardRate = forwardRate
self.reverseRate = reverseRate
self.timeCourse = timeCourse
def factory(*args_, **kwargs_):
if GateHHRatesTau.subclass:
return GateHHRatesTau.subclass(*args_, **kwargs_)
else:
return GateHHRatesTau(*args_, **kwargs_)
factory = staticmethod(factory)
def get_notes(self): return self.notes
def set_notes(self, notes): self.notes = notes
def validate_Notes(self, value):
# Validate type Notes, a restriction on xs:string.
pass
def get_q10Settings(self): return self.q10Settings
def set_q10Settings(self, q10Settings): self.q10Settings = q10Settings
def get_forwardRate(self): return self.forwardRate
def set_forwardRate(self, forwardRate): self.forwardRate = forwardRate
def get_reverseRate(self): return self.reverseRate
def set_reverseRate(self, reverseRate): self.reverseRate = reverseRate
def get_timeCourse(self): return self.timeCourse
def set_timeCourse(self, timeCourse): self.timeCourse = timeCourse
def get_instances(self): return self.instances
def set_instances(self, instances): self.instances = instances
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_gateTypes(self, value):
# Validate type gateTypes, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.notes is not None or
self.q10Settings is not None or
self.forwardRate is not None or
self.reverseRate is not None or
self.timeCourse is not None or
super(GateHHRatesTau, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='GateHHRatesTau', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHRatesTau')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GateHHRatesTau'):
super(GateHHRatesTau, self).exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHRatesTau')
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
outfile.write(' instances="%s"' % self.gds_format_integer(self.instances, input_name='instances'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='', name_='GateHHRatesTau', fromsubclass_=False, pretty_print=True):
super(GateHHRatesTau, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.notes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snotes>%s</%snotes>%s' % (namespace_, self.gds_format_string(quote_xml(self.notes).encode(ExternalEncoding), input_name='notes'), namespace_, eol_))
if self.q10Settings is not None:
self.q10Settings.export(outfile, level, namespace_, name_='q10Settings', pretty_print=pretty_print)
if self.forwardRate is not None:
self.forwardRate.export(outfile, level, namespace_, name_='forwardRate', pretty_print=pretty_print)
if self.reverseRate is not None:
self.reverseRate.export(outfile, level, namespace_, name_='reverseRate', pretty_print=pretty_print)
if self.timeCourse is not None:
self.timeCourse.export(outfile, level, namespace_, name_='timeCourse', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='GateHHRatesTau'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
showIndent(outfile, level)
outfile.write('instances=%d,\n' % (self.instances,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(GateHHRatesTau, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(GateHHRatesTau, self).exportLiteralChildren(outfile, level, name_)
if self.notes is not None:
showIndent(outfile, level)
outfile.write('notes=%s,\n' % quote_python(self.notes).encode(ExternalEncoding))
if self.q10Settings is not None:
showIndent(outfile, level)
outfile.write('q10Settings=model_.Q10Settings(\n')
self.q10Settings.exportLiteral(outfile, level, name_='q10Settings')
showIndent(outfile, level)
outfile.write('),\n')
if self.forwardRate is not None:
showIndent(outfile, level)
outfile.write('forwardRate=model_.HHRate(\n')
self.forwardRate.exportLiteral(outfile, level, name_='forwardRate')
showIndent(outfile, level)
outfile.write('),\n')
if self.reverseRate is not None:
showIndent(outfile, level)
outfile.write('reverseRate=model_.HHRate(\n')
self.reverseRate.exportLiteral(outfile, level, name_='reverseRate')
showIndent(outfile, level)
outfile.write('),\n')
if self.timeCourse is not None:
showIndent(outfile, level)
outfile.write('timeCourse=model_.HHTime(\n')
self.timeCourse.exportLiteral(outfile, level, name_='timeCourse')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('instances', node)
if value is not None and 'instances' not in already_processed:
already_processed.add('instances')
try:
self.instances = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_gateTypes(self.type_) # validate type gateTypes
super(GateHHRatesTau, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'notes':
notes_ = child_.text
notes_ = self.gds_validate_string(notes_, node, 'notes')
self.notes = notes_
self.validate_Notes(self.notes) # validate type Notes
elif nodeName_ == 'q10Settings':
obj_ = Q10Settings.factory()
obj_.build(child_)
self.set_q10Settings(obj_)
elif nodeName_ == 'forwardRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_forwardRate(obj_)
elif nodeName_ == 'reverseRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_reverseRate(obj_)
elif nodeName_ == 'timeCourse':
obj_ = HHTime.factory()
obj_.build(child_)
self.set_timeCourse(obj_)
super(GateHHRatesTau, self).buildChildren(child_, node, nodeName_, True)
# end class GateHHRatesTau
class GateHHTauInf(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, instances=1, type_=None, notes=None, q10Settings=None, timeCourse=None, steadyState=None):
super(GateHHTauInf, self).__init__(id, neuroLexId, )
self.instances = _cast(int, instances)
self.type_ = _cast(None, type_)
self.notes = notes
self.q10Settings = q10Settings
self.timeCourse = timeCourse
self.steadyState = steadyState
def factory(*args_, **kwargs_):
if GateHHTauInf.subclass:
return GateHHTauInf.subclass(*args_, **kwargs_)
else:
return GateHHTauInf(*args_, **kwargs_)
factory = staticmethod(factory)
def get_notes(self): return self.notes
def set_notes(self, notes): self.notes = notes
def validate_Notes(self, value):
# Validate type Notes, a restriction on xs:string.
pass
def get_q10Settings(self): return self.q10Settings
def set_q10Settings(self, q10Settings): self.q10Settings = q10Settings
def get_timeCourse(self): return self.timeCourse
def set_timeCourse(self, timeCourse): self.timeCourse = timeCourse
def get_steadyState(self): return self.steadyState
def set_steadyState(self, steadyState): self.steadyState = steadyState
def get_instances(self): return self.instances
def set_instances(self, instances): self.instances = instances
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_gateTypes(self, value):
# Validate type gateTypes, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.notes is not None or
self.q10Settings is not None or
self.timeCourse is not None or
self.steadyState is not None or
super(GateHHTauInf, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='GateHHTauInf', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHTauInf')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GateHHTauInf'):
super(GateHHTauInf, self).exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHTauInf')
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
outfile.write(' instances="%s"' % self.gds_format_integer(self.instances, input_name='instances'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='', name_='GateHHTauInf', fromsubclass_=False, pretty_print=True):
super(GateHHTauInf, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.notes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snotes>%s</%snotes>%s' % (namespace_, self.gds_format_string(quote_xml(self.notes).encode(ExternalEncoding), input_name='notes'), namespace_, eol_))
if self.q10Settings is not None:
self.q10Settings.export(outfile, level, namespace_, name_='q10Settings', pretty_print=pretty_print)
if self.timeCourse is not None:
self.timeCourse.export(outfile, level, namespace_, name_='timeCourse', pretty_print=pretty_print)
if self.steadyState is not None:
self.steadyState.export(outfile, level, namespace_, name_='steadyState', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='GateHHTauInf'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
showIndent(outfile, level)
outfile.write('instances=%d,\n' % (self.instances,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(GateHHTauInf, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(GateHHTauInf, self).exportLiteralChildren(outfile, level, name_)
if self.notes is not None:
showIndent(outfile, level)
outfile.write('notes=%s,\n' % quote_python(self.notes).encode(ExternalEncoding))
if self.q10Settings is not None:
showIndent(outfile, level)
outfile.write('q10Settings=model_.Q10Settings(\n')
self.q10Settings.exportLiteral(outfile, level, name_='q10Settings')
showIndent(outfile, level)
outfile.write('),\n')
if self.timeCourse is not None:
showIndent(outfile, level)
outfile.write('timeCourse=model_.HHTime(\n')
self.timeCourse.exportLiteral(outfile, level, name_='timeCourse')
showIndent(outfile, level)
outfile.write('),\n')
if self.steadyState is not None:
showIndent(outfile, level)
outfile.write('steadyState=model_.HHVariable(\n')
self.steadyState.exportLiteral(outfile, level, name_='steadyState')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('instances', node)
if value is not None and 'instances' not in already_processed:
already_processed.add('instances')
try:
self.instances = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_gateTypes(self.type_) # validate type gateTypes
super(GateHHTauInf, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'notes':
notes_ = child_.text
notes_ = self.gds_validate_string(notes_, node, 'notes')
self.notes = notes_
self.validate_Notes(self.notes) # validate type Notes
elif nodeName_ == 'q10Settings':
obj_ = Q10Settings.factory()
obj_.build(child_)
self.set_q10Settings(obj_)
elif nodeName_ == 'timeCourse':
obj_ = HHTime.factory()
obj_.build(child_)
self.set_timeCourse(obj_)
elif nodeName_ == 'steadyState':
obj_ = HHVariable.factory()
obj_.build(child_)
self.set_steadyState(obj_)
super(GateHHTauInf, self).buildChildren(child_, node, nodeName_, True)
# end class GateHHTauInf
class GateHHRates(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, instances=1, type_=None, notes=None, q10Settings=None, forwardRate=None, reverseRate=None):
super(GateHHRates, self).__init__(id, neuroLexId, )
self.instances = _cast(int, instances)
self.type_ = _cast(None, type_)
self.notes = notes
self.q10Settings = q10Settings
self.forwardRate = forwardRate
self.reverseRate = reverseRate
def factory(*args_, **kwargs_):
if GateHHRates.subclass:
return GateHHRates.subclass(*args_, **kwargs_)
else:
return GateHHRates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_notes(self): return self.notes
def set_notes(self, notes): self.notes = notes
def validate_Notes(self, value):
# Validate type Notes, a restriction on xs:string.
pass
def get_q10Settings(self): return self.q10Settings
def set_q10Settings(self, q10Settings): self.q10Settings = q10Settings
def get_forwardRate(self): return self.forwardRate
def set_forwardRate(self, forwardRate): self.forwardRate = forwardRate
def get_reverseRate(self): return self.reverseRate
def set_reverseRate(self, reverseRate): self.reverseRate = reverseRate
def get_instances(self): return self.instances
def set_instances(self, instances): self.instances = instances
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_gateTypes(self, value):
# Validate type gateTypes, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.notes is not None or
self.q10Settings is not None or
self.forwardRate is not None or
self.reverseRate is not None or
super(GateHHRates, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='GateHHRates', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHRates')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GateHHRates'):
super(GateHHRates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHRates')
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
outfile.write(' instances="%s"' % self.gds_format_integer(self.instances, input_name='instances'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='', name_='GateHHRates', fromsubclass_=False, pretty_print=True):
super(GateHHRates, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.notes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snotes>%s</%snotes>%s' % (namespace_, self.gds_format_string(quote_xml(self.notes).encode(ExternalEncoding), input_name='notes'), namespace_, eol_))
if self.q10Settings is not None:
self.q10Settings.export(outfile, level, namespace_, name_='q10Settings', pretty_print=pretty_print)
if self.forwardRate is not None:
self.forwardRate.export(outfile, level, namespace_, name_='forwardRate', pretty_print=pretty_print)
if self.reverseRate is not None:
self.reverseRate.export(outfile, level, namespace_, name_='reverseRate', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='GateHHRates'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
showIndent(outfile, level)
outfile.write('instances=%d,\n' % (self.instances,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(GateHHRates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(GateHHRates, self).exportLiteralChildren(outfile, level, name_)
if self.notes is not None:
showIndent(outfile, level)
outfile.write('notes=%s,\n' % quote_python(self.notes).encode(ExternalEncoding))
if self.q10Settings is not None:
showIndent(outfile, level)
outfile.write('q10Settings=model_.Q10Settings(\n')
self.q10Settings.exportLiteral(outfile, level, name_='q10Settings')
showIndent(outfile, level)
outfile.write('),\n')
if self.forwardRate is not None:
showIndent(outfile, level)
outfile.write('forwardRate=model_.HHRate(\n')
self.forwardRate.exportLiteral(outfile, level, name_='forwardRate')
showIndent(outfile, level)
outfile.write('),\n')
if self.reverseRate is not None:
showIndent(outfile, level)
outfile.write('reverseRate=model_.HHRate(\n')
self.reverseRate.exportLiteral(outfile, level, name_='reverseRate')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('instances', node)
if value is not None and 'instances' not in already_processed:
already_processed.add('instances')
try:
self.instances = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_gateTypes(self.type_) # validate type gateTypes
super(GateHHRates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'notes':
notes_ = child_.text
notes_ = self.gds_validate_string(notes_, node, 'notes')
self.notes = notes_
self.validate_Notes(self.notes) # validate type Notes
elif nodeName_ == 'q10Settings':
obj_ = Q10Settings.factory()
obj_.build(child_)
self.set_q10Settings(obj_)
elif nodeName_ == 'forwardRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_forwardRate(obj_)
elif nodeName_ == 'reverseRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_reverseRate(obj_)
super(GateHHRates, self).buildChildren(child_, node, nodeName_, True)
# end class GateHHRates
class GateHHUndetermined(Base):
subclass = None
superclass = Base
def __init__(self, id=None, neuroLexId=None, instances=1, type_=None, notes=None, q10Settings=None, forwardRate=None, reverseRate=None, timeCourse=None, steadyState=None):
super(GateHHUndetermined, self).__init__(id, neuroLexId, )
self.instances = _cast(int, instances)
self.type_ = _cast(None, type_)
self.notes = notes
self.q10Settings = q10Settings
self.forwardRate = forwardRate
self.reverseRate = reverseRate
self.timeCourse = timeCourse
self.steadyState = steadyState
def factory(*args_, **kwargs_):
if GateHHUndetermined.subclass:
return GateHHUndetermined.subclass(*args_, **kwargs_)
else:
return GateHHUndetermined(*args_, **kwargs_)
factory = staticmethod(factory)
def get_notes(self): return self.notes
def set_notes(self, notes): self.notes = notes
def validate_Notes(self, value):
# Validate type Notes, a restriction on xs:string.
pass
def get_q10Settings(self): return self.q10Settings
def set_q10Settings(self, q10Settings): self.q10Settings = q10Settings
def get_forwardRate(self): return self.forwardRate
def set_forwardRate(self, forwardRate): self.forwardRate = forwardRate
def get_reverseRate(self): return self.reverseRate
def set_reverseRate(self, reverseRate): self.reverseRate = reverseRate
def get_timeCourse(self): return self.timeCourse
def set_timeCourse(self, timeCourse): self.timeCourse = timeCourse
def get_steadyState(self): return self.steadyState
def set_steadyState(self, steadyState): self.steadyState = steadyState
def get_instances(self): return self.instances
def set_instances(self, instances): self.instances = instances
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_gateTypes(self, value):
# Validate type gateTypes, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.notes is not None or
self.q10Settings is not None or
self.forwardRate is not None or
self.reverseRate is not None or
self.timeCourse is not None or
self.steadyState is not None or
super(GateHHUndetermined, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='GateHHUndetermined', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHUndetermined')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GateHHUndetermined'):
super(GateHHUndetermined, self).exportAttributes(outfile, level, already_processed, namespace_, name_='GateHHUndetermined')
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
outfile.write(' instances="%s"' % self.gds_format_integer(self.instances, input_name='instances'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, outfile, level, namespace_='', name_='GateHHUndetermined', fromsubclass_=False, pretty_print=True):
super(GateHHUndetermined, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.notes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snotes>%s</%snotes>%s' % (namespace_, self.gds_format_string(quote_xml(self.notes).encode(ExternalEncoding), input_name='notes'), namespace_, eol_))
if self.q10Settings is not None:
self.q10Settings.export(outfile, level, namespace_, name_='q10Settings', pretty_print=pretty_print)
if self.forwardRate is not None:
self.forwardRate.export(outfile, level, namespace_, name_='forwardRate', pretty_print=pretty_print)
if self.reverseRate is not None:
self.reverseRate.export(outfile, level, namespace_, name_='reverseRate', pretty_print=pretty_print)
if self.timeCourse is not None:
self.timeCourse.export(outfile, level, namespace_, name_='timeCourse', pretty_print=pretty_print)
if self.steadyState is not None:
self.steadyState.export(outfile, level, namespace_, name_='steadyState', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='GateHHUndetermined'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.instances is not None and 'instances' not in already_processed:
already_processed.add('instances')
showIndent(outfile, level)
outfile.write('instances=%d,\n' % (self.instances,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(GateHHUndetermined, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(GateHHUndetermined, self).exportLiteralChildren(outfile, level, name_)
if self.notes is not None:
showIndent(outfile, level)
outfile.write('notes=%s,\n' % quote_python(self.notes).encode(ExternalEncoding))
if self.q10Settings is not None:
showIndent(outfile, level)
outfile.write('q10Settings=model_.Q10Settings(\n')
self.q10Settings.exportLiteral(outfile, level, name_='q10Settings')
showIndent(outfile, level)
outfile.write('),\n')
if self.forwardRate is not None:
showIndent(outfile, level)
outfile.write('forwardRate=model_.HHRate(\n')
self.forwardRate.exportLiteral(outfile, level, name_='forwardRate')
showIndent(outfile, level)
outfile.write('),\n')
if self.reverseRate is not None:
showIndent(outfile, level)
outfile.write('reverseRate=model_.HHRate(\n')
self.reverseRate.exportLiteral(outfile, level, name_='reverseRate')
showIndent(outfile, level)
outfile.write('),\n')
if self.timeCourse is not None:
showIndent(outfile, level)
outfile.write('timeCourse=model_.HHTime(\n')
self.timeCourse.exportLiteral(outfile, level, name_='timeCourse')
showIndent(outfile, level)
outfile.write('),\n')
if self.steadyState is not None:
showIndent(outfile, level)
outfile.write('steadyState=model_.HHVariable(\n')
self.steadyState.exportLiteral(outfile, level, name_='steadyState')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('instances', node)
if value is not None and 'instances' not in already_processed:
already_processed.add('instances')
try:
self.instances = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_gateTypes(self.type_) # validate type gateTypes
super(GateHHUndetermined, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'notes':
notes_ = child_.text
notes_ = self.gds_validate_string(notes_, node, 'notes')
self.notes = notes_
self.validate_Notes(self.notes) # validate type Notes
elif nodeName_ == 'q10Settings':
obj_ = Q10Settings.factory()
obj_.build(child_)
self.set_q10Settings(obj_)
elif nodeName_ == 'forwardRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_forwardRate(obj_)
elif nodeName_ == 'reverseRate':
obj_ = HHRate.factory()
obj_.build(child_)
self.set_reverseRate(obj_)
elif nodeName_ == 'timeCourse':
obj_ = HHTime.factory()
obj_.build(child_)
self.set_timeCourse(obj_)
elif nodeName_ == 'steadyState':
obj_ = HHVariable.factory()
obj_.build(child_)
self.set_steadyState(obj_)
super(GateHHUndetermined, self).buildChildren(child_, node, nodeName_, True)
# end class GateHHUndetermined
class IonChannel(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, conductance=None, type_=None, species=None, gate=None, gateHHrates=None, gateHHratesTau=None, gateHHtauInf=None, gateHHratesInf=None):
super(IonChannel, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.conductance = _cast(None, conductance)
self.type_ = _cast(None, type_)
self.species = _cast(None, species)
if gate is None:
self.gate = []
else:
self.gate = gate
if gateHHrates is None:
self.gateHHrates = []
else:
self.gateHHrates = gateHHrates
if gateHHratesTau is None:
self.gateHHratesTau = []
else:
self.gateHHratesTau = gateHHratesTau
if gateHHtauInf is None:
self.gateHHtauInf = []
else:
self.gateHHtauInf = gateHHtauInf
if gateHHratesInf is None:
self.gateHHratesInf = []
else:
self.gateHHratesInf = gateHHratesInf
def factory(*args_, **kwargs_):
if IonChannel.subclass:
return IonChannel.subclass(*args_, **kwargs_)
else:
return IonChannel(*args_, **kwargs_)
factory = staticmethod(factory)
def get_gate(self): return self.gate
def set_gate(self, gate): self.gate = gate
def add_gate(self, value): self.gate.append(value)
def insert_gate(self, index, value): self.gate[index] = value
def get_gateHHrates(self): return self.gateHHrates
def set_gateHHrates(self, gateHHrates): self.gateHHrates = gateHHrates
def add_gateHHrates(self, value): self.gateHHrates.append(value)
def insert_gateHHrates(self, index, value): self.gateHHrates[index] = value
def get_gateHHratesTau(self): return self.gateHHratesTau
def set_gateHHratesTau(self, gateHHratesTau): self.gateHHratesTau = gateHHratesTau
def add_gateHHratesTau(self, value): self.gateHHratesTau.append(value)
def insert_gateHHratesTau(self, index, value): self.gateHHratesTau[index] = value
def get_gateHHtauInf(self): return self.gateHHtauInf
def set_gateHHtauInf(self, gateHHtauInf): self.gateHHtauInf = gateHHtauInf
def add_gateHHtauInf(self, value): self.gateHHtauInf.append(value)
def insert_gateHHtauInf(self, index, value): self.gateHHtauInf[index] = value
def get_gateHHratesInf(self): return self.gateHHratesInf
def set_gateHHratesInf(self, gateHHratesInf): self.gateHHratesInf = gateHHratesInf
def add_gateHHratesInf(self, value): self.gateHHratesInf.append(value)
def insert_gateHHratesInf(self, index, value): self.gateHHratesInf[index] = value
def get_conductance(self): return self.conductance
def set_conductance(self, conductance): self.conductance = conductance
def validate_Nml2Quantity_conductance(self, value):
# Validate type Nml2Quantity_conductance, a restriction on xs:string.
pass
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_channelTypes(self, value):
# Validate type channelTypes, a restriction on xs:string.
pass
def get_species(self): return self.species
def set_species(self, species): self.species = species
def validate_NmlId(self, value):
# Validate type NmlId, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.gate or
self.gateHHrates or
self.gateHHratesTau or
self.gateHHtauInf or
self.gateHHratesInf or
super(IonChannel, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IonChannel', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IonChannel')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IonChannel'):
super(IonChannel, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IonChannel')
if self.conductance is not None and 'conductance' not in already_processed:
already_processed.add('conductance')
outfile.write(' conductance=%s' % (quote_attrib(self.conductance), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.species is not None and 'species' not in already_processed:
already_processed.add('species')
outfile.write(' species=%s' % (quote_attrib(self.species), ))
def exportChildren(self, outfile, level, namespace_='', name_='IonChannel', fromsubclass_=False, pretty_print=True):
super(IonChannel, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for gate_ in self.gate:
gate_.export(outfile, level, namespace_, name_='gate', pretty_print=pretty_print)
for gateHHrates_ in self.gateHHrates:
gateHHrates_.export(outfile, level, namespace_, name_='gateHHrates', pretty_print=pretty_print)
for gateHHratesTau_ in self.gateHHratesTau:
gateHHratesTau_.export(outfile, level, namespace_, name_='gateHHratesTau', pretty_print=pretty_print)
for gateHHtauInf_ in self.gateHHtauInf:
gateHHtauInf_.export(outfile, level, namespace_, name_='gateHHtauInf', pretty_print=pretty_print)
for gateHHratesInf_ in self.gateHHratesInf:
gateHHratesInf_.export(outfile, level, namespace_, name_='gateHHratesInf', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IonChannel'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.conductance is not None and 'conductance' not in already_processed:
already_processed.add('conductance')
showIndent(outfile, level)
outfile.write('conductance="%s",\n' % (self.conductance,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.species is not None and 'species' not in already_processed:
already_processed.add('species')
showIndent(outfile, level)
outfile.write('species="%s",\n' % (self.species,))
super(IonChannel, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IonChannel, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('gate=[\n')
level += 1
for gate_ in self.gate:
showIndent(outfile, level)
outfile.write('model_.GateHHUndetermined(\n')
gate_.exportLiteral(outfile, level, name_='GateHHUndetermined')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('gateHHrates=[\n')
level += 1
for gateHHrates_ in self.gateHHrates:
showIndent(outfile, level)
outfile.write('model_.GateHHRates(\n')
gateHHrates_.exportLiteral(outfile, level, name_='GateHHRates')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('gateHHratesTau=[\n')
level += 1
for gateHHratesTau_ in self.gateHHratesTau:
showIndent(outfile, level)
outfile.write('model_.GateHHRatesTau(\n')
gateHHratesTau_.exportLiteral(outfile, level, name_='GateHHRatesTau')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('gateHHtauInf=[\n')
level += 1
for gateHHtauInf_ in self.gateHHtauInf:
showIndent(outfile, level)
outfile.write('model_.GateHHTauInf(\n')
gateHHtauInf_.exportLiteral(outfile, level, name_='GateHHTauInf')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('gateHHratesInf=[\n')
level += 1
for gateHHratesInf_ in self.gateHHratesInf:
showIndent(outfile, level)
outfile.write('model_.GateHHRatesInf(\n')
gateHHratesInf_.exportLiteral(outfile, level, name_='GateHHRatesInf')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('conductance', node)
if value is not None and 'conductance' not in already_processed:
already_processed.add('conductance')
self.conductance = value
self.validate_Nml2Quantity_conductance(self.conductance) # validate type Nml2Quantity_conductance
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_channelTypes(self.type_) # validate type channelTypes
value = find_attr_value_('species', node)
if value is not None and 'species' not in already_processed:
already_processed.add('species')
self.species = value
self.validate_NmlId(self.species) # validate type NmlId
super(IonChannel, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'gate':
obj_ = GateHHUndetermined.factory()
obj_.build(child_)
self.gate.append(obj_)
elif nodeName_ == 'gateHHrates':
obj_ = GateHHRates.factory()
obj_.build(child_)
self.gateHHrates.append(obj_)
elif nodeName_ == 'gateHHratesTau':
obj_ = GateHHRatesTau.factory()
obj_.build(child_)
self.gateHHratesTau.append(obj_)
elif nodeName_ == 'gateHHtauInf':
obj_ = GateHHTauInf.factory()
obj_.build(child_)
self.gateHHtauInf.append(obj_)
elif nodeName_ == 'gateHHratesInf':
obj_ = GateHHRatesInf.factory()
obj_.build(child_)
self.gateHHratesInf.append(obj_)
super(IonChannel, self).buildChildren(child_, node, nodeName_, True)
# end class IonChannel
class NeuroMLDocument(Standalone):
subclass = None
superclass = Standalone
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, include=None, extracellularProperties=None, intracellularProperties=None, morphology=None, ionChannel=None, decayingPoolConcentrationModel=None, expOneSynapse=None, expTwoSynapse=None, blockingPlasticSynapse=None, biophysicalProperties=None, cell=None, baseCell=None, iafTauCell=None, iafTauRefCell=None, iafCell=None, iafRefCell=None, izhikevichCell=None, adExIaFCell=None, pulseGenerator=None, sineGenerator=None, rampGenerator=None, voltageClamp=None, spikeArray=None, spikeGenerator=None, spikeGeneratorRandom=None, spikeGeneratorPoisson=None, IF_curr_alpha=None, IF_curr_exp=None, IF_cond_alpha=None, IF_cond_exp=None, EIF_cond_exp_isfa_ista=None, EIF_cond_alpha_isfa_ista=None, HH_cond_exp=None, expCondSynapse=None, alphaCondSynapse=None, expCurrSynapse=None, alphaCurrSynapse=None, SpikeSourcePoisson=None, network=None, ComponentType=None):
super(NeuroMLDocument, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
if include is None:
self.include = []
else:
self.include = include
if extracellularProperties is None:
self.extracellularProperties = []
else:
self.extracellularProperties = extracellularProperties
if intracellularProperties is None:
self.intracellularProperties = []
else:
self.intracellularProperties = intracellularProperties
if morphology is None:
self.morphology = []
else:
self.morphology = morphology
if ionChannel is None:
self.ionChannel = []
else:
self.ionChannel = ionChannel
if decayingPoolConcentrationModel is None:
self.decayingPoolConcentrationModel = []
else:
self.decayingPoolConcentrationModel = decayingPoolConcentrationModel
if expOneSynapse is None:
self.expOneSynapse = []
else:
self.expOneSynapse = expOneSynapse
if expTwoSynapse is None:
self.expTwoSynapse = []
else:
self.expTwoSynapse = expTwoSynapse
if blockingPlasticSynapse is None:
self.blockingPlasticSynapse = []
else:
self.blockingPlasticSynapse = blockingPlasticSynapse
if biophysicalProperties is None:
self.biophysicalProperties = []
else:
self.biophysicalProperties = biophysicalProperties
if cell is None:
self.cell = []
else:
self.cell = cell
if baseCell is None:
self.baseCell = []
else:
self.baseCell = baseCell
if iafTauCell is None:
self.iafTauCell = []
else:
self.iafTauCell = iafTauCell
if iafTauRefCell is None:
self.iafTauRefCell = []
else:
self.iafTauRefCell = iafTauRefCell
if iafCell is None:
self.iafCell = []
else:
self.iafCell = iafCell
if iafRefCell is None:
self.iafRefCell = []
else:
self.iafRefCell = iafRefCell
if izhikevichCell is None:
self.izhikevichCell = []
else:
self.izhikevichCell = izhikevichCell
if adExIaFCell is None:
self.adExIaFCell = []
else:
self.adExIaFCell = adExIaFCell
if pulseGenerator is None:
self.pulseGenerator = []
else:
self.pulseGenerator = pulseGenerator
if sineGenerator is None:
self.sineGenerator = []
else:
self.sineGenerator = sineGenerator
if rampGenerator is None:
self.rampGenerator = []
else:
self.rampGenerator = rampGenerator
if voltageClamp is None:
self.voltageClamp = []
else:
self.voltageClamp = voltageClamp
if spikeArray is None:
self.spikeArray = []
else:
self.spikeArray = spikeArray
if spikeGenerator is None:
self.spikeGenerator = []
else:
self.spikeGenerator = spikeGenerator
if spikeGeneratorRandom is None:
self.spikeGeneratorRandom = []
else:
self.spikeGeneratorRandom = spikeGeneratorRandom
if spikeGeneratorPoisson is None:
self.spikeGeneratorPoisson = []
else:
self.spikeGeneratorPoisson = spikeGeneratorPoisson
if IF_curr_alpha is None:
self.IF_curr_alpha = []
else:
self.IF_curr_alpha = IF_curr_alpha
if IF_curr_exp is None:
self.IF_curr_exp = []
else:
self.IF_curr_exp = IF_curr_exp
if IF_cond_alpha is None:
self.IF_cond_alpha = []
else:
self.IF_cond_alpha = IF_cond_alpha
if IF_cond_exp is None:
self.IF_cond_exp = []
else:
self.IF_cond_exp = IF_cond_exp
if EIF_cond_exp_isfa_ista is None:
self.EIF_cond_exp_isfa_ista = []
else:
self.EIF_cond_exp_isfa_ista = EIF_cond_exp_isfa_ista
if EIF_cond_alpha_isfa_ista is None:
self.EIF_cond_alpha_isfa_ista = []
else:
self.EIF_cond_alpha_isfa_ista = EIF_cond_alpha_isfa_ista
if HH_cond_exp is None:
self.HH_cond_exp = []
else:
self.HH_cond_exp = HH_cond_exp
if expCondSynapse is None:
self.expCondSynapse = []
else:
self.expCondSynapse = expCondSynapse
if alphaCondSynapse is None:
self.alphaCondSynapse = []
else:
self.alphaCondSynapse = alphaCondSynapse
if expCurrSynapse is None:
self.expCurrSynapse = []
else:
self.expCurrSynapse = expCurrSynapse
if alphaCurrSynapse is None:
self.alphaCurrSynapse = []
else:
self.alphaCurrSynapse = alphaCurrSynapse
if SpikeSourcePoisson is None:
self.SpikeSourcePoisson = []
else:
self.SpikeSourcePoisson = SpikeSourcePoisson
if network is None:
self.network = []
else:
self.network = network
if ComponentType is None:
self.ComponentType = []
else:
self.ComponentType = ComponentType
def factory(*args_, **kwargs_):
if NeuroMLDocument.subclass:
return NeuroMLDocument.subclass(*args_, **kwargs_)
else:
return NeuroMLDocument(*args_, **kwargs_)
factory = staticmethod(factory)
def get_include(self): return self.include
def set_include(self, include): self.include = include
def add_include(self, value): self.include.append(value)
def insert_include(self, index, value): self.include[index] = value
def get_extracellularProperties(self): return self.extracellularProperties
def set_extracellularProperties(self, extracellularProperties): self.extracellularProperties = extracellularProperties
def add_extracellularProperties(self, value): self.extracellularProperties.append(value)
def insert_extracellularProperties(self, index, value): self.extracellularProperties[index] = value
def get_intracellularProperties(self): return self.intracellularProperties
def set_intracellularProperties(self, intracellularProperties): self.intracellularProperties = intracellularProperties
def add_intracellularProperties(self, value): self.intracellularProperties.append(value)
def insert_intracellularProperties(self, index, value): self.intracellularProperties[index] = value
def get_morphology(self): return self.morphology
def set_morphology(self, morphology): self.morphology = morphology
def add_morphology(self, value): self.morphology.append(value)
def insert_morphology(self, index, value): self.morphology[index] = value
def get_ionChannel(self): return self.ionChannel
def set_ionChannel(self, ionChannel): self.ionChannel = ionChannel
def add_ionChannel(self, value): self.ionChannel.append(value)
def insert_ionChannel(self, index, value): self.ionChannel[index] = value
def get_decayingPoolConcentrationModel(self): return self.decayingPoolConcentrationModel
def set_decayingPoolConcentrationModel(self, decayingPoolConcentrationModel): self.decayingPoolConcentrationModel = decayingPoolConcentrationModel
def add_decayingPoolConcentrationModel(self, value): self.decayingPoolConcentrationModel.append(value)
def insert_decayingPoolConcentrationModel(self, index, value): self.decayingPoolConcentrationModel[index] = value
def get_expOneSynapse(self): return self.expOneSynapse
def set_expOneSynapse(self, expOneSynapse): self.expOneSynapse = expOneSynapse
def add_expOneSynapse(self, value): self.expOneSynapse.append(value)
def insert_expOneSynapse(self, index, value): self.expOneSynapse[index] = value
def get_expTwoSynapse(self): return self.expTwoSynapse
def set_expTwoSynapse(self, expTwoSynapse): self.expTwoSynapse = expTwoSynapse
def add_expTwoSynapse(self, value): self.expTwoSynapse.append(value)
def insert_expTwoSynapse(self, index, value): self.expTwoSynapse[index] = value
def get_blockingPlasticSynapse(self): return self.blockingPlasticSynapse
def set_blockingPlasticSynapse(self, blockingPlasticSynapse): self.blockingPlasticSynapse = blockingPlasticSynapse
def add_blockingPlasticSynapse(self, value): self.blockingPlasticSynapse.append(value)
def insert_blockingPlasticSynapse(self, index, value): self.blockingPlasticSynapse[index] = value
def get_biophysicalProperties(self): return self.biophysicalProperties
def set_biophysicalProperties(self, biophysicalProperties): self.biophysicalProperties = biophysicalProperties
def add_biophysicalProperties(self, value): self.biophysicalProperties.append(value)
def insert_biophysicalProperties(self, index, value): self.biophysicalProperties[index] = value
def get_cell(self): return self.cell
def set_cell(self, cell): self.cell = cell
def add_cell(self, value): self.cell.append(value)
def insert_cell(self, index, value): self.cell[index] = value
def get_baseCell(self): return self.baseCell
def set_baseCell(self, baseCell): self.baseCell = baseCell
def add_baseCell(self, value): self.baseCell.append(value)
def insert_baseCell(self, index, value): self.baseCell[index] = value
def get_iafTauCell(self): return self.iafTauCell
def set_iafTauCell(self, iafTauCell): self.iafTauCell = iafTauCell
def add_iafTauCell(self, value): self.iafTauCell.append(value)
def insert_iafTauCell(self, index, value): self.iafTauCell[index] = value
def get_iafTauRefCell(self): return self.iafTauRefCell
def set_iafTauRefCell(self, iafTauRefCell): self.iafTauRefCell = iafTauRefCell
def add_iafTauRefCell(self, value): self.iafTauRefCell.append(value)
def insert_iafTauRefCell(self, index, value): self.iafTauRefCell[index] = value
def get_iafCell(self): return self.iafCell
def set_iafCell(self, iafCell): self.iafCell = iafCell
def add_iafCell(self, value): self.iafCell.append(value)
def insert_iafCell(self, index, value): self.iafCell[index] = value
def get_iafRefCell(self): return self.iafRefCell
def set_iafRefCell(self, iafRefCell): self.iafRefCell = iafRefCell
def add_iafRefCell(self, value): self.iafRefCell.append(value)
def insert_iafRefCell(self, index, value): self.iafRefCell[index] = value
def get_izhikevichCell(self): return self.izhikevichCell
def set_izhikevichCell(self, izhikevichCell): self.izhikevichCell = izhikevichCell
def add_izhikevichCell(self, value): self.izhikevichCell.append(value)
def insert_izhikevichCell(self, index, value): self.izhikevichCell[index] = value
def get_adExIaFCell(self): return self.adExIaFCell
def set_adExIaFCell(self, adExIaFCell): self.adExIaFCell = adExIaFCell
def add_adExIaFCell(self, value): self.adExIaFCell.append(value)
def insert_adExIaFCell(self, index, value): self.adExIaFCell[index] = value
def get_pulseGenerator(self): return self.pulseGenerator
def set_pulseGenerator(self, pulseGenerator): self.pulseGenerator = pulseGenerator
def add_pulseGenerator(self, value): self.pulseGenerator.append(value)
def insert_pulseGenerator(self, index, value): self.pulseGenerator[index] = value
def get_sineGenerator(self): return self.sineGenerator
def set_sineGenerator(self, sineGenerator): self.sineGenerator = sineGenerator
def add_sineGenerator(self, value): self.sineGenerator.append(value)
def insert_sineGenerator(self, index, value): self.sineGenerator[index] = value
def get_rampGenerator(self): return self.rampGenerator
def set_rampGenerator(self, rampGenerator): self.rampGenerator = rampGenerator
def add_rampGenerator(self, value): self.rampGenerator.append(value)
def insert_rampGenerator(self, index, value): self.rampGenerator[index] = value
def get_voltageClamp(self): return self.voltageClamp
def set_voltageClamp(self, voltageClamp): self.voltageClamp = voltageClamp
def add_voltageClamp(self, value): self.voltageClamp.append(value)
def insert_voltageClamp(self, index, value): self.voltageClamp[index] = value
def get_spikeArray(self): return self.spikeArray
def set_spikeArray(self, spikeArray): self.spikeArray = spikeArray
def add_spikeArray(self, value): self.spikeArray.append(value)
def insert_spikeArray(self, index, value): self.spikeArray[index] = value
def get_spikeGenerator(self): return self.spikeGenerator
def set_spikeGenerator(self, spikeGenerator): self.spikeGenerator = spikeGenerator
def add_spikeGenerator(self, value): self.spikeGenerator.append(value)
def insert_spikeGenerator(self, index, value): self.spikeGenerator[index] = value
def get_spikeGeneratorRandom(self): return self.spikeGeneratorRandom
def set_spikeGeneratorRandom(self, spikeGeneratorRandom): self.spikeGeneratorRandom = spikeGeneratorRandom
def add_spikeGeneratorRandom(self, value): self.spikeGeneratorRandom.append(value)
def insert_spikeGeneratorRandom(self, index, value): self.spikeGeneratorRandom[index] = value
def get_spikeGeneratorPoisson(self): return self.spikeGeneratorPoisson
def set_spikeGeneratorPoisson(self, spikeGeneratorPoisson): self.spikeGeneratorPoisson = spikeGeneratorPoisson
def add_spikeGeneratorPoisson(self, value): self.spikeGeneratorPoisson.append(value)
def insert_spikeGeneratorPoisson(self, index, value): self.spikeGeneratorPoisson[index] = value
def get_IF_curr_alpha(self): return self.IF_curr_alpha
def set_IF_curr_alpha(self, IF_curr_alpha): self.IF_curr_alpha = IF_curr_alpha
def add_IF_curr_alpha(self, value): self.IF_curr_alpha.append(value)
def insert_IF_curr_alpha(self, index, value): self.IF_curr_alpha[index] = value
def get_IF_curr_exp(self): return self.IF_curr_exp
def set_IF_curr_exp(self, IF_curr_exp): self.IF_curr_exp = IF_curr_exp
def add_IF_curr_exp(self, value): self.IF_curr_exp.append(value)
def insert_IF_curr_exp(self, index, value): self.IF_curr_exp[index] = value
def get_IF_cond_alpha(self): return self.IF_cond_alpha
def set_IF_cond_alpha(self, IF_cond_alpha): self.IF_cond_alpha = IF_cond_alpha
def add_IF_cond_alpha(self, value): self.IF_cond_alpha.append(value)
def insert_IF_cond_alpha(self, index, value): self.IF_cond_alpha[index] = value
def get_IF_cond_exp(self): return self.IF_cond_exp
def set_IF_cond_exp(self, IF_cond_exp): self.IF_cond_exp = IF_cond_exp
def add_IF_cond_exp(self, value): self.IF_cond_exp.append(value)
def insert_IF_cond_exp(self, index, value): self.IF_cond_exp[index] = value
def get_EIF_cond_exp_isfa_ista(self): return self.EIF_cond_exp_isfa_ista
def set_EIF_cond_exp_isfa_ista(self, EIF_cond_exp_isfa_ista): self.EIF_cond_exp_isfa_ista = EIF_cond_exp_isfa_ista
def add_EIF_cond_exp_isfa_ista(self, value): self.EIF_cond_exp_isfa_ista.append(value)
def insert_EIF_cond_exp_isfa_ista(self, index, value): self.EIF_cond_exp_isfa_ista[index] = value
def get_EIF_cond_alpha_isfa_ista(self): return self.EIF_cond_alpha_isfa_ista
def set_EIF_cond_alpha_isfa_ista(self, EIF_cond_alpha_isfa_ista): self.EIF_cond_alpha_isfa_ista = EIF_cond_alpha_isfa_ista
def add_EIF_cond_alpha_isfa_ista(self, value): self.EIF_cond_alpha_isfa_ista.append(value)
def insert_EIF_cond_alpha_isfa_ista(self, index, value): self.EIF_cond_alpha_isfa_ista[index] = value
def get_HH_cond_exp(self): return self.HH_cond_exp
def set_HH_cond_exp(self, HH_cond_exp): self.HH_cond_exp = HH_cond_exp
def add_HH_cond_exp(self, value): self.HH_cond_exp.append(value)
def insert_HH_cond_exp(self, index, value): self.HH_cond_exp[index] = value
def get_expCondSynapse(self): return self.expCondSynapse
def set_expCondSynapse(self, expCondSynapse): self.expCondSynapse = expCondSynapse
def add_expCondSynapse(self, value): self.expCondSynapse.append(value)
def insert_expCondSynapse(self, index, value): self.expCondSynapse[index] = value
def get_alphaCondSynapse(self): return self.alphaCondSynapse
def set_alphaCondSynapse(self, alphaCondSynapse): self.alphaCondSynapse = alphaCondSynapse
def add_alphaCondSynapse(self, value): self.alphaCondSynapse.append(value)
def insert_alphaCondSynapse(self, index, value): self.alphaCondSynapse[index] = value
def get_expCurrSynapse(self): return self.expCurrSynapse
def set_expCurrSynapse(self, expCurrSynapse): self.expCurrSynapse = expCurrSynapse
def add_expCurrSynapse(self, value): self.expCurrSynapse.append(value)
def insert_expCurrSynapse(self, index, value): self.expCurrSynapse[index] = value
def get_alphaCurrSynapse(self): return self.alphaCurrSynapse
def set_alphaCurrSynapse(self, alphaCurrSynapse): self.alphaCurrSynapse = alphaCurrSynapse
def add_alphaCurrSynapse(self, value): self.alphaCurrSynapse.append(value)
def insert_alphaCurrSynapse(self, index, value): self.alphaCurrSynapse[index] = value
def get_SpikeSourcePoisson(self): return self.SpikeSourcePoisson
def set_SpikeSourcePoisson(self, SpikeSourcePoisson): self.SpikeSourcePoisson = SpikeSourcePoisson
def add_SpikeSourcePoisson(self, value): self.SpikeSourcePoisson.append(value)
def insert_SpikeSourcePoisson(self, index, value): self.SpikeSourcePoisson[index] = value
def get_network(self): return self.network
def set_network(self, network): self.network = network
def add_network(self, value): self.network.append(value)
def insert_network(self, index, value): self.network[index] = value
def get_ComponentType(self): return self.ComponentType
def set_ComponentType(self, ComponentType): self.ComponentType = ComponentType
def add_ComponentType(self, value): self.ComponentType.append(value)
def insert_ComponentType(self, index, value): self.ComponentType[index] = value
def hasContent_(self):
if (
self.include or
self.extracellularProperties or
self.intracellularProperties or
self.morphology or
self.ionChannel or
self.decayingPoolConcentrationModel or
self.expOneSynapse or
self.expTwoSynapse or
self.blockingPlasticSynapse or
self.biophysicalProperties or
self.cell or
self.baseCell or
self.iafTauCell or
self.iafTauRefCell or
self.iafCell or
self.iafRefCell or
self.izhikevichCell or
self.adExIaFCell or
self.pulseGenerator or
self.sineGenerator or
self.rampGenerator or
self.voltageClamp or
self.spikeArray or
self.spikeGenerator or
self.spikeGeneratorRandom or
self.spikeGeneratorPoisson or
self.IF_curr_alpha or
self.IF_curr_exp or
self.IF_cond_alpha or
self.IF_cond_exp or
self.EIF_cond_exp_isfa_ista or
self.EIF_cond_alpha_isfa_ista or
self.HH_cond_exp or
self.expCondSynapse or
self.alphaCondSynapse or
self.expCurrSynapse or
self.alphaCurrSynapse or
self.SpikeSourcePoisson or
self.network or
self.ComponentType or
super(NeuroMLDocument, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='NeuroMLDocument', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NeuroMLDocument')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NeuroMLDocument'):
super(NeuroMLDocument, self).exportAttributes(outfile, level, already_processed, namespace_, name_='NeuroMLDocument')
def exportChildren(self, outfile, level, namespace_='', name_='NeuroMLDocument', fromsubclass_=False, pretty_print=True):
super(NeuroMLDocument, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for include_ in self.include:
include_.export(outfile, level, namespace_, name_='include', pretty_print=pretty_print)
for extracellularProperties_ in self.extracellularProperties:
extracellularProperties_.export(outfile, level, namespace_, name_='extracellularProperties', pretty_print=pretty_print)
for intracellularProperties_ in self.intracellularProperties:
intracellularProperties_.export(outfile, level, namespace_, name_='intracellularProperties', pretty_print=pretty_print)
for morphology_ in self.morphology:
morphology_.export(outfile, level, namespace_, name_='morphology', pretty_print=pretty_print)
for ionChannel_ in self.ionChannel:
ionChannel_.export(outfile, level, namespace_, name_='ionChannel', pretty_print=pretty_print)
for decayingPoolConcentrationModel_ in self.decayingPoolConcentrationModel:
decayingPoolConcentrationModel_.export(outfile, level, namespace_, name_='decayingPoolConcentrationModel', pretty_print=pretty_print)
for expOneSynapse_ in self.expOneSynapse:
expOneSynapse_.export(outfile, level, namespace_, name_='expOneSynapse', pretty_print=pretty_print)
for expTwoSynapse_ in self.expTwoSynapse:
expTwoSynapse_.export(outfile, level, namespace_, name_='expTwoSynapse', pretty_print=pretty_print)
for blockingPlasticSynapse_ in self.blockingPlasticSynapse:
blockingPlasticSynapse_.export(outfile, level, namespace_, name_='blockingPlasticSynapse', pretty_print=pretty_print)
for biophysicalProperties_ in self.biophysicalProperties:
biophysicalProperties_.export(outfile, level, namespace_, name_='biophysicalProperties', pretty_print=pretty_print)
for cell_ in self.cell:
cell_.export(outfile, level, namespace_, name_='cell', pretty_print=pretty_print)
for baseCell_ in self.baseCell:
baseCell_.export(outfile, level, namespace_, name_='baseCell', pretty_print=pretty_print)
for iafTauCell_ in self.iafTauCell:
iafTauCell_.export(outfile, level, namespace_, name_='iafTauCell', pretty_print=pretty_print)
for iafTauRefCell_ in self.iafTauRefCell:
iafTauRefCell_.export(outfile, level, namespace_, name_='iafTauRefCell', pretty_print=pretty_print)
for iafCell_ in self.iafCell:
iafCell_.export(outfile, level, namespace_, name_='iafCell', pretty_print=pretty_print)
for iafRefCell_ in self.iafRefCell:
iafRefCell_.export(outfile, level, namespace_, name_='iafRefCell', pretty_print=pretty_print)
for izhikevichCell_ in self.izhikevichCell:
izhikevichCell_.export(outfile, level, namespace_, name_='izhikevichCell', pretty_print=pretty_print)
for adExIaFCell_ in self.adExIaFCell:
adExIaFCell_.export(outfile, level, namespace_, name_='adExIaFCell', pretty_print=pretty_print)
for pulseGenerator_ in self.pulseGenerator:
pulseGenerator_.export(outfile, level, namespace_, name_='pulseGenerator', pretty_print=pretty_print)
for sineGenerator_ in self.sineGenerator:
sineGenerator_.export(outfile, level, namespace_, name_='sineGenerator', pretty_print=pretty_print)
for rampGenerator_ in self.rampGenerator:
rampGenerator_.export(outfile, level, namespace_, name_='rampGenerator', pretty_print=pretty_print)
for voltageClamp_ in self.voltageClamp:
voltageClamp_.export(outfile, level, namespace_, name_='voltageClamp', pretty_print=pretty_print)
for spikeArray_ in self.spikeArray:
spikeArray_.export(outfile, level, namespace_, name_='spikeArray', pretty_print=pretty_print)
for spikeGenerator_ in self.spikeGenerator:
spikeGenerator_.export(outfile, level, namespace_, name_='spikeGenerator', pretty_print=pretty_print)
for spikeGeneratorRandom_ in self.spikeGeneratorRandom:
spikeGeneratorRandom_.export(outfile, level, namespace_, name_='spikeGeneratorRandom', pretty_print=pretty_print)
for spikeGeneratorPoisson_ in self.spikeGeneratorPoisson:
spikeGeneratorPoisson_.export(outfile, level, namespace_, name_='spikeGeneratorPoisson', pretty_print=pretty_print)
for IF_curr_alpha_ in self.IF_curr_alpha:
IF_curr_alpha_.export(outfile, level, namespace_, name_='IF_curr_alpha', pretty_print=pretty_print)
for IF_curr_exp_ in self.IF_curr_exp:
IF_curr_exp_.export(outfile, level, namespace_, name_='IF_curr_exp', pretty_print=pretty_print)
for IF_cond_alpha_ in self.IF_cond_alpha:
IF_cond_alpha_.export(outfile, level, namespace_, name_='IF_cond_alpha', pretty_print=pretty_print)
for IF_cond_exp_ in self.IF_cond_exp:
IF_cond_exp_.export(outfile, level, namespace_, name_='IF_cond_exp', pretty_print=pretty_print)
for EIF_cond_exp_isfa_ista_ in self.EIF_cond_exp_isfa_ista:
EIF_cond_exp_isfa_ista_.export(outfile, level, namespace_, name_='EIF_cond_exp_isfa_ista', pretty_print=pretty_print)
for EIF_cond_alpha_isfa_ista_ in self.EIF_cond_alpha_isfa_ista:
EIF_cond_alpha_isfa_ista_.export(outfile, level, namespace_, name_='EIF_cond_alpha_isfa_ista', pretty_print=pretty_print)
for HH_cond_exp_ in self.HH_cond_exp:
HH_cond_exp_.export(outfile, level, namespace_, name_='HH_cond_exp', pretty_print=pretty_print)
for expCondSynapse_ in self.expCondSynapse:
expCondSynapse_.export(outfile, level, namespace_, name_='expCondSynapse', pretty_print=pretty_print)
for alphaCondSynapse_ in self.alphaCondSynapse:
alphaCondSynapse_.export(outfile, level, namespace_, name_='alphaCondSynapse', pretty_print=pretty_print)
for expCurrSynapse_ in self.expCurrSynapse:
expCurrSynapse_.export(outfile, level, namespace_, name_='expCurrSynapse', pretty_print=pretty_print)
for alphaCurrSynapse_ in self.alphaCurrSynapse:
alphaCurrSynapse_.export(outfile, level, namespace_, name_='alphaCurrSynapse', pretty_print=pretty_print)
for SpikeSourcePoisson_ in self.SpikeSourcePoisson:
SpikeSourcePoisson_.export(outfile, level, namespace_, name_='SpikeSourcePoisson', pretty_print=pretty_print)
for network_ in self.network:
network_.export(outfile, level, namespace_, name_='network', pretty_print=pretty_print)
for ComponentType_ in self.ComponentType:
ComponentType_.export(outfile, level, namespace_, name_='ComponentType', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='NeuroMLDocument'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(NeuroMLDocument, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NeuroMLDocument, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('include=[\n')
level += 1
for include_ in self.include:
showIndent(outfile, level)
outfile.write('model_.IncludeType(\n')
include_.exportLiteral(outfile, level, name_='IncludeType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('extracellularProperties=[\n')
level += 1
for extracellularProperties_ in self.extracellularProperties:
showIndent(outfile, level)
outfile.write('model_.ExtracellularProperties(\n')
extracellularProperties_.exportLiteral(outfile, level, name_='ExtracellularProperties')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('intracellularProperties=[\n')
level += 1
for intracellularProperties_ in self.intracellularProperties:
showIndent(outfile, level)
outfile.write('model_.IntracellularProperties(\n')
intracellularProperties_.exportLiteral(outfile, level, name_='IntracellularProperties')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('morphology=[\n')
level += 1
for morphology_ in self.morphology:
showIndent(outfile, level)
outfile.write('model_.Morphology(\n')
morphology_.exportLiteral(outfile, level, name_='Morphology')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('ionChannel=[\n')
level += 1
for ionChannel_ in self.ionChannel:
showIndent(outfile, level)
outfile.write('model_.IonChannel(\n')
ionChannel_.exportLiteral(outfile, level, name_='IonChannel')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('decayingPoolConcentrationModel=[\n')
level += 1
for decayingPoolConcentrationModel_ in self.decayingPoolConcentrationModel:
showIndent(outfile, level)
outfile.write('model_.DecayingPoolConcentrationModel(\n')
decayingPoolConcentrationModel_.exportLiteral(outfile, level, name_='DecayingPoolConcentrationModel')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('expOneSynapse=[\n')
level += 1
for expOneSynapse_ in self.expOneSynapse:
showIndent(outfile, level)
outfile.write('model_.ExpOneSynapse(\n')
expOneSynapse_.exportLiteral(outfile, level, name_='ExpOneSynapse')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('expTwoSynapse=[\n')
level += 1
for expTwoSynapse_ in self.expTwoSynapse:
showIndent(outfile, level)
outfile.write('model_.ExpTwoSynapse(\n')
expTwoSynapse_.exportLiteral(outfile, level, name_='ExpTwoSynapse')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('blockingPlasticSynapse=[\n')
level += 1
for blockingPlasticSynapse_ in self.blockingPlasticSynapse:
showIndent(outfile, level)
outfile.write('model_.BlockingPlasticSynapse(\n')
blockingPlasticSynapse_.exportLiteral(outfile, level, name_='BlockingPlasticSynapse')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('biophysicalProperties=[\n')
level += 1
for biophysicalProperties_ in self.biophysicalProperties:
showIndent(outfile, level)
outfile.write('model_.BiophysicalProperties(\n')
biophysicalProperties_.exportLiteral(outfile, level, name_='BiophysicalProperties')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('cell=[\n')
level += 1
for cell_ in self.cell:
showIndent(outfile, level)
outfile.write('model_.Cell(\n')
cell_.exportLiteral(outfile, level, name_='Cell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('baseCell=[\n')
level += 1
for baseCell_ in self.baseCell:
showIndent(outfile, level)
outfile.write('model_.BaseCell(\n')
baseCell_.exportLiteral(outfile, level, name_='BaseCell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('iafTauCell=[\n')
level += 1
for iafTauCell_ in self.iafTauCell:
showIndent(outfile, level)
outfile.write('model_.IaFTauCell(\n')
iafTauCell_.exportLiteral(outfile, level, name_='IaFTauCell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('iafTauRefCell=[\n')
level += 1
for iafTauRefCell_ in self.iafTauRefCell:
showIndent(outfile, level)
outfile.write('model_.IaFTauRefCell(\n')
iafTauRefCell_.exportLiteral(outfile, level, name_='IaFTauRefCell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('iafCell=[\n')
level += 1
for iafCell_ in self.iafCell:
showIndent(outfile, level)
outfile.write('model_.IaFCell(\n')
iafCell_.exportLiteral(outfile, level, name_='IaFCell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('iafRefCell=[\n')
level += 1
for iafRefCell_ in self.iafRefCell:
showIndent(outfile, level)
outfile.write('model_.IaFRefCell(\n')
iafRefCell_.exportLiteral(outfile, level, name_='IaFRefCell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('izhikevichCell=[\n')
level += 1
for izhikevichCell_ in self.izhikevichCell:
showIndent(outfile, level)
outfile.write('model_.IzhikevichCell(\n')
izhikevichCell_.exportLiteral(outfile, level, name_='IzhikevichCell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('adExIaFCell=[\n')
level += 1
for adExIaFCell_ in self.adExIaFCell:
showIndent(outfile, level)
outfile.write('model_.AdExIaFCell(\n')
adExIaFCell_.exportLiteral(outfile, level, name_='AdExIaFCell')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('pulseGenerator=[\n')
level += 1
for pulseGenerator_ in self.pulseGenerator:
showIndent(outfile, level)
outfile.write('model_.PulseGenerator(\n')
pulseGenerator_.exportLiteral(outfile, level, name_='PulseGenerator')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('sineGenerator=[\n')
level += 1
for sineGenerator_ in self.sineGenerator:
showIndent(outfile, level)
outfile.write('model_.SineGenerator(\n')
sineGenerator_.exportLiteral(outfile, level, name_='SineGenerator')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('rampGenerator=[\n')
level += 1
for rampGenerator_ in self.rampGenerator:
showIndent(outfile, level)
outfile.write('model_.RampGenerator(\n')
rampGenerator_.exportLiteral(outfile, level, name_='RampGenerator')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('voltageClamp=[\n')
level += 1
for voltageClamp_ in self.voltageClamp:
showIndent(outfile, level)
outfile.write('model_.VoltageClamp(\n')
voltageClamp_.exportLiteral(outfile, level, name_='VoltageClamp')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('spikeArray=[\n')
level += 1
for spikeArray_ in self.spikeArray:
showIndent(outfile, level)
outfile.write('model_.SpikeArray(\n')
spikeArray_.exportLiteral(outfile, level, name_='SpikeArray')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('spikeGenerator=[\n')
level += 1
for spikeGenerator_ in self.spikeGenerator:
showIndent(outfile, level)
outfile.write('model_.SpikeGenerator(\n')
spikeGenerator_.exportLiteral(outfile, level, name_='SpikeGenerator')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('spikeGeneratorRandom=[\n')
level += 1
for spikeGeneratorRandom_ in self.spikeGeneratorRandom:
showIndent(outfile, level)
outfile.write('model_.SpikeGeneratorRandom(\n')
spikeGeneratorRandom_.exportLiteral(outfile, level, name_='SpikeGeneratorRandom')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('spikeGeneratorPoisson=[\n')
level += 1
for spikeGeneratorPoisson_ in self.spikeGeneratorPoisson:
showIndent(outfile, level)
outfile.write('model_.SpikeGeneratorPoisson(\n')
spikeGeneratorPoisson_.exportLiteral(outfile, level, name_='SpikeGeneratorPoisson')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('IF_curr_alpha=[\n')
level += 1
for IF_curr_alpha_ in self.IF_curr_alpha:
showIndent(outfile, level)
outfile.write('model_.IF_curr_alpha(\n')
IF_curr_alpha_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('IF_curr_exp=[\n')
level += 1
for IF_curr_exp_ in self.IF_curr_exp:
showIndent(outfile, level)
outfile.write('model_.IF_curr_exp(\n')
IF_curr_exp_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('IF_cond_alpha=[\n')
level += 1
for IF_cond_alpha_ in self.IF_cond_alpha:
showIndent(outfile, level)
outfile.write('model_.IF_cond_alpha(\n')
IF_cond_alpha_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('IF_cond_exp=[\n')
level += 1
for IF_cond_exp_ in self.IF_cond_exp:
showIndent(outfile, level)
outfile.write('model_.IF_cond_exp(\n')
IF_cond_exp_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('EIF_cond_exp_isfa_ista=[\n')
level += 1
for EIF_cond_exp_isfa_ista_ in self.EIF_cond_exp_isfa_ista:
showIndent(outfile, level)
outfile.write('model_.EIF_cond_exp_isfa_ista(\n')
EIF_cond_exp_isfa_ista_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('EIF_cond_alpha_isfa_ista=[\n')
level += 1
for EIF_cond_alpha_isfa_ista_ in self.EIF_cond_alpha_isfa_ista:
showIndent(outfile, level)
outfile.write('model_.EIF_cond_alpha_isfa_ista(\n')
EIF_cond_alpha_isfa_ista_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('HH_cond_exp=[\n')
level += 1
for HH_cond_exp_ in self.HH_cond_exp:
showIndent(outfile, level)
outfile.write('model_.HH_cond_exp(\n')
HH_cond_exp_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('expCondSynapse=[\n')
level += 1
for expCondSynapse_ in self.expCondSynapse:
showIndent(outfile, level)
outfile.write('model_.ExpCondSynapse(\n')
expCondSynapse_.exportLiteral(outfile, level, name_='ExpCondSynapse')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('alphaCondSynapse=[\n')
level += 1
for alphaCondSynapse_ in self.alphaCondSynapse:
showIndent(outfile, level)
outfile.write('model_.AlphaCondSynapse(\n')
alphaCondSynapse_.exportLiteral(outfile, level, name_='AlphaCondSynapse')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('expCurrSynapse=[\n')
level += 1
for expCurrSynapse_ in self.expCurrSynapse:
showIndent(outfile, level)
outfile.write('model_.ExpCurrSynapse(\n')
expCurrSynapse_.exportLiteral(outfile, level, name_='ExpCurrSynapse')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('alphaCurrSynapse=[\n')
level += 1
for alphaCurrSynapse_ in self.alphaCurrSynapse:
showIndent(outfile, level)
outfile.write('model_.AlphaCurrSynapse(\n')
alphaCurrSynapse_.exportLiteral(outfile, level, name_='AlphaCurrSynapse')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('SpikeSourcePoisson=[\n')
level += 1
for SpikeSourcePoisson_ in self.SpikeSourcePoisson:
showIndent(outfile, level)
outfile.write('model_.SpikeSourcePoisson(\n')
SpikeSourcePoisson_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('network=[\n')
level += 1
for network_ in self.network:
showIndent(outfile, level)
outfile.write('model_.Network(\n')
network_.exportLiteral(outfile, level, name_='Network')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('ComponentType=[\n')
level += 1
for ComponentType_ in self.ComponentType:
showIndent(outfile, level)
outfile.write('model_.ComponentType(\n')
ComponentType_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(NeuroMLDocument, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'include':
obj_ = IncludeType.factory()
obj_.build(child_)
self.include.append(obj_)
elif nodeName_ == 'extracellularProperties':
obj_ = ExtracellularProperties.factory()
obj_.build(child_)
self.extracellularProperties.append(obj_)
elif nodeName_ == 'intracellularProperties':
obj_ = IntracellularProperties.factory()
obj_.build(child_)
self.intracellularProperties.append(obj_)
elif nodeName_ == 'morphology':
obj_ = Morphology.factory()
obj_.build(child_)
self.morphology.append(obj_)
elif nodeName_ == 'ionChannel':
obj_ = IonChannel.factory()
obj_.build(child_)
self.ionChannel.append(obj_)
elif nodeName_ == 'decayingPoolConcentrationModel':
class_obj_ = self.get_class_obj_(child_, DecayingPoolConcentrationModel)
obj_ = class_obj_.factory()
obj_.build(child_)
self.decayingPoolConcentrationModel.append(obj_)
elif nodeName_ == 'expOneSynapse':
obj_ = ExpOneSynapse.factory()
obj_.build(child_)
self.expOneSynapse.append(obj_)
elif nodeName_ == 'expTwoSynapse':
class_obj_ = self.get_class_obj_(child_, ExpTwoSynapse)
obj_ = class_obj_.factory()
obj_.build(child_)
self.expTwoSynapse.append(obj_)
elif nodeName_ == 'blockingPlasticSynapse':
obj_ = BlockingPlasticSynapse.factory()
obj_.build(child_)
self.blockingPlasticSynapse.append(obj_)
elif nodeName_ == 'biophysicalProperties':
obj_ = BiophysicalProperties.factory()
obj_.build(child_)
self.biophysicalProperties.append(obj_)
elif nodeName_ == 'cell':
obj_ = Cell.factory()
obj_.build(child_)
self.cell.append(obj_)
elif nodeName_ == 'baseCell':
class_obj_ = self.get_class_obj_(child_, BaseCell)
obj_ = class_obj_.factory()
obj_.build(child_)
self.baseCell.append(obj_)
elif nodeName_ == 'iafTauCell':
class_obj_ = self.get_class_obj_(child_, IaFTauCell)
obj_ = class_obj_.factory()
obj_.build(child_)
self.iafTauCell.append(obj_)
elif nodeName_ == 'iafTauRefCell':
obj_ = IaFTauRefCell.factory()
obj_.build(child_)
self.iafTauRefCell.append(obj_)
elif nodeName_ == 'iafCell':
class_obj_ = self.get_class_obj_(child_, IaFCell)
obj_ = class_obj_.factory()
obj_.build(child_)
self.iafCell.append(obj_)
elif nodeName_ == 'iafRefCell':
obj_ = IaFRefCell.factory()
obj_.build(child_)
self.iafRefCell.append(obj_)
elif nodeName_ == 'izhikevichCell':
obj_ = IzhikevichCell.factory()
obj_.build(child_)
self.izhikevichCell.append(obj_)
elif nodeName_ == 'adExIaFCell':
obj_ = AdExIaFCell.factory()
obj_.build(child_)
self.adExIaFCell.append(obj_)
elif nodeName_ == 'pulseGenerator':
obj_ = PulseGenerator.factory()
obj_.build(child_)
self.pulseGenerator.append(obj_)
elif nodeName_ == 'sineGenerator':
obj_ = SineGenerator.factory()
obj_.build(child_)
self.sineGenerator.append(obj_)
elif nodeName_ == 'rampGenerator':
obj_ = RampGenerator.factory()
obj_.build(child_)
self.rampGenerator.append(obj_)
elif nodeName_ == 'voltageClamp':
obj_ = VoltageClamp.factory()
obj_.build(child_)
self.voltageClamp.append(obj_)
elif nodeName_ == 'spikeArray':
obj_ = SpikeArray.factory()
obj_.build(child_)
self.spikeArray.append(obj_)
elif nodeName_ == 'spikeGenerator':
obj_ = SpikeGenerator.factory()
obj_.build(child_)
self.spikeGenerator.append(obj_)
elif nodeName_ == 'spikeGeneratorRandom':
obj_ = SpikeGeneratorRandom.factory()
obj_.build(child_)
self.spikeGeneratorRandom.append(obj_)
elif nodeName_ == 'spikeGeneratorPoisson':
obj_ = SpikeGeneratorPoisson.factory()
obj_.build(child_)
self.spikeGeneratorPoisson.append(obj_)
elif nodeName_ == 'IF_curr_alpha':
obj_ = IF_curr_alpha.factory()
obj_.build(child_)
self.IF_curr_alpha.append(obj_)
elif nodeName_ == 'IF_curr_exp':
obj_ = IF_curr_exp.factory()
obj_.build(child_)
self.IF_curr_exp.append(obj_)
elif nodeName_ == 'IF_cond_alpha':
obj_ = IF_cond_alpha.factory()
obj_.build(child_)
self.IF_cond_alpha.append(obj_)
elif nodeName_ == 'IF_cond_exp':
obj_ = IF_cond_exp.factory()
obj_.build(child_)
self.IF_cond_exp.append(obj_)
elif nodeName_ == 'EIF_cond_exp_isfa_ista':
obj_ = EIF_cond_exp_isfa_ista.factory()
obj_.build(child_)
self.EIF_cond_exp_isfa_ista.append(obj_)
elif nodeName_ == 'EIF_cond_alpha_isfa_ista':
obj_ = EIF_cond_alpha_isfa_ista.factory()
obj_.build(child_)
self.EIF_cond_alpha_isfa_ista.append(obj_)
elif nodeName_ == 'HH_cond_exp':
obj_ = HH_cond_exp.factory()
obj_.build(child_)
self.HH_cond_exp.append(obj_)
elif nodeName_ == 'expCondSynapse':
obj_ = ExpCondSynapse.factory()
obj_.build(child_)
self.expCondSynapse.append(obj_)
elif nodeName_ == 'alphaCondSynapse':
obj_ = AlphaCondSynapse.factory()
obj_.build(child_)
self.alphaCondSynapse.append(obj_)
elif nodeName_ == 'expCurrSynapse':
obj_ = ExpCurrSynapse.factory()
obj_.build(child_)
self.expCurrSynapse.append(obj_)
elif nodeName_ == 'alphaCurrSynapse':
obj_ = AlphaCurrSynapse.factory()
obj_.build(child_)
self.alphaCurrSynapse.append(obj_)
elif nodeName_ == 'SpikeSourcePoisson':
obj_ = SpikeSourcePoisson.factory()
obj_.build(child_)
self.SpikeSourcePoisson.append(obj_)
elif nodeName_ == 'network':
obj_ = Network.factory()
obj_.build(child_)
self.network.append(obj_)
elif nodeName_ == 'ComponentType':
obj_ = ComponentType.factory()
obj_.build(child_)
self.ComponentType.append(obj_)
super(NeuroMLDocument, self).buildChildren(child_, node, nodeName_, True)
# end class NeuroMLDocument
class BasePynnSynapse(BaseSynapse):
subclass = None
superclass = BaseSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn=None, extensiontype_=None):
super(BasePynnSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.tau_syn = _cast(float, tau_syn)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if BasePynnSynapse.subclass:
return BasePynnSynapse.subclass(*args_, **kwargs_)
else:
return BasePynnSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tau_syn(self): return self.tau_syn
def set_tau_syn(self, tau_syn): self.tau_syn = tau_syn
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(BasePynnSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BasePynnSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BasePynnSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BasePynnSynapse'):
super(BasePynnSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BasePynnSynapse')
if self.tau_syn is not None and 'tau_syn' not in already_processed:
already_processed.add('tau_syn')
outfile.write(' tau_syn="%s"' % self.gds_format_double(self.tau_syn, input_name='tau_syn'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='BasePynnSynapse', fromsubclass_=False, pretty_print=True):
super(BasePynnSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='BasePynnSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tau_syn is not None and 'tau_syn' not in already_processed:
already_processed.add('tau_syn')
showIndent(outfile, level)
outfile.write('tau_syn=%e,\n' % (self.tau_syn,))
super(BasePynnSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BasePynnSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tau_syn', node)
if value is not None and 'tau_syn' not in already_processed:
already_processed.add('tau_syn')
try:
self.tau_syn = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (tau_syn): %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BasePynnSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BasePynnSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BasePynnSynapse
class basePyNNCell(BaseCell):
subclass = None
superclass = BaseCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, extensiontype_=None):
super(basePyNNCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.tau_syn_I = _cast(float, tau_syn_I)
self.tau_syn_E = _cast(float, tau_syn_E)
self.i_offset = _cast(float, i_offset)
self.cm = _cast(float, cm)
self.v_init = _cast(float, v_init)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if basePyNNCell.subclass:
return basePyNNCell.subclass(*args_, **kwargs_)
else:
return basePyNNCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tau_syn_I(self): return self.tau_syn_I
def set_tau_syn_I(self, tau_syn_I): self.tau_syn_I = tau_syn_I
def get_tau_syn_E(self): return self.tau_syn_E
def set_tau_syn_E(self, tau_syn_E): self.tau_syn_E = tau_syn_E
def get_i_offset(self): return self.i_offset
def set_i_offset(self, i_offset): self.i_offset = i_offset
def get_cm(self): return self.cm
def set_cm(self, cm): self.cm = cm
def get_v_init(self): return self.v_init
def set_v_init(self, v_init): self.v_init = v_init
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(basePyNNCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='basePyNNCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='basePyNNCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='basePyNNCell'):
super(basePyNNCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='basePyNNCell')
if self.tau_syn_I is not None and 'tau_syn_I' not in already_processed:
already_processed.add('tau_syn_I')
outfile.write(' tau_syn_I="%s"' % self.gds_format_double(self.tau_syn_I, input_name='tau_syn_I'))
if self.tau_syn_E is not None and 'tau_syn_E' not in already_processed:
already_processed.add('tau_syn_E')
outfile.write(' tau_syn_E="%s"' % self.gds_format_double(self.tau_syn_E, input_name='tau_syn_E'))
if self.i_offset is not None and 'i_offset' not in already_processed:
already_processed.add('i_offset')
outfile.write(' i_offset="%s"' % self.gds_format_double(self.i_offset, input_name='i_offset'))
if self.cm is not None and 'cm' not in already_processed:
already_processed.add('cm')
outfile.write(' cm="%s"' % self.gds_format_double(self.cm, input_name='cm'))
if self.v_init is not None and 'v_init' not in already_processed:
already_processed.add('v_init')
outfile.write(' v_init="%s"' % self.gds_format_double(self.v_init, input_name='v_init'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='basePyNNCell', fromsubclass_=False, pretty_print=True):
super(basePyNNCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='basePyNNCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tau_syn_I is not None and 'tau_syn_I' not in already_processed:
already_processed.add('tau_syn_I')
showIndent(outfile, level)
outfile.write('tau_syn_I=%e,\n' % (self.tau_syn_I,))
if self.tau_syn_E is not None and 'tau_syn_E' not in already_processed:
already_processed.add('tau_syn_E')
showIndent(outfile, level)
outfile.write('tau_syn_E=%e,\n' % (self.tau_syn_E,))
if self.i_offset is not None and 'i_offset' not in already_processed:
already_processed.add('i_offset')
showIndent(outfile, level)
outfile.write('i_offset=%e,\n' % (self.i_offset,))
if self.cm is not None and 'cm' not in already_processed:
already_processed.add('cm')
showIndent(outfile, level)
outfile.write('cm=%e,\n' % (self.cm,))
if self.v_init is not None and 'v_init' not in already_processed:
already_processed.add('v_init')
showIndent(outfile, level)
outfile.write('v_init=%e,\n' % (self.v_init,))
super(basePyNNCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(basePyNNCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tau_syn_I', node)
if value is not None and 'tau_syn_I' not in already_processed:
already_processed.add('tau_syn_I')
try:
self.tau_syn_I = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (tau_syn_I): %s' % exp)
value = find_attr_value_('tau_syn_E', node)
if value is not None and 'tau_syn_E' not in already_processed:
already_processed.add('tau_syn_E')
try:
self.tau_syn_E = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (tau_syn_E): %s' % exp)
value = find_attr_value_('i_offset', node)
if value is not None and 'i_offset' not in already_processed:
already_processed.add('i_offset')
try:
self.i_offset = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (i_offset): %s' % exp)
value = find_attr_value_('cm', node)
if value is not None and 'cm' not in already_processed:
already_processed.add('cm')
try:
self.cm = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (cm): %s' % exp)
value = find_attr_value_('v_init', node)
if value is not None and 'v_init' not in already_processed:
already_processed.add('v_init')
try:
self.v_init = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (v_init): %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(basePyNNCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(basePyNNCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class basePyNNCell
class ConcentrationModel_D(DecayingPoolConcentrationModel):
subclass = None
superclass = DecayingPoolConcentrationModel
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, ion=None, shellThickness=None, restingConc=None, decayConstant=None, type_=None):
super(ConcentrationModel_D, self).__init__(id, neuroLexId, name, metaid, notes, annotation, ion, shellThickness, restingConc, decayConstant, )
self.type_ = _cast(None, type_)
pass
def factory(*args_, **kwargs_):
if ConcentrationModel_D.subclass:
return ConcentrationModel_D.subclass(*args_, **kwargs_)
else:
return ConcentrationModel_D(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def hasContent_(self):
if (
super(ConcentrationModel_D, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ConcentrationModel_D', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ConcentrationModel_D')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ConcentrationModel_D'):
super(ConcentrationModel_D, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ConcentrationModel_D')
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ConcentrationModel_D', fromsubclass_=False, pretty_print=True):
super(ConcentrationModel_D, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ConcentrationModel_D'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(ConcentrationModel_D, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ConcentrationModel_D, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
super(ConcentrationModel_D, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ConcentrationModel_D, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ConcentrationModel_D
class Cell(BaseCell):
"""Should only be used if morphology element is outside the cell. This
points to the id of the morphology Should only be used if
biophysicalProperties element is outside the cell. This points
to the id of the biophysicalProperties"""
subclass = None
superclass = BaseCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, biophysicalProperties_attr=None, morphology_attr=None, morphology=None, biophysicalProperties=None):
super(Cell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.biophysicalProperties_attr = _cast(None, biophysicalProperties_attr)
self.morphology_attr = _cast(None, morphology_attr)
self.morphology = morphology
self.biophysicalProperties = biophysicalProperties
def factory(*args_, **kwargs_):
if Cell.subclass:
return Cell.subclass(*args_, **kwargs_)
else:
return Cell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_morphology(self): return self.morphology
def set_morphology(self, morphology): self.morphology = morphology
def get_biophysicalProperties(self): return self.biophysicalProperties
def set_biophysicalProperties(self, biophysicalProperties): self.biophysicalProperties = biophysicalProperties
def get_biophysicalProperties_attr(self): return self.biophysicalProperties_attr
def set_biophysicalProperties_attr(self, biophysicalProperties_attr): self.biophysicalProperties_attr = biophysicalProperties_attr
def get_morphology_attr(self): return self.morphology_attr
def set_morphology_attr(self, morphology_attr): self.morphology_attr = morphology_attr
def hasContent_(self):
if (
self.morphology is not None or
self.biophysicalProperties is not None or
super(Cell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Cell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Cell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Cell'):
super(Cell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Cell')
if self.biophysicalProperties_attr is not None and 'biophysicalProperties_attr' not in already_processed:
already_processed.add('biophysicalProperties_attr')
outfile.write(' biophysicalProperties=%s' % (self.gds_format_string(quote_attrib(self.biophysicalProperties_attr).encode(ExternalEncoding), input_name='biophysicalProperties_attr'), ))
if self.morphology_attr is not None and 'morphology_attr' not in already_processed:
already_processed.add('morphology_attr')
outfile.write(' morphology=%s' % (self.gds_format_string(quote_attrib(self.morphology_attr).encode(ExternalEncoding), input_name='morphology_attr'), ))
def exportChildren(self, outfile, level, namespace_='', name_='Cell', fromsubclass_=False, pretty_print=True):
super(Cell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.morphology is not None:
self.morphology.export(outfile, level, namespace_, name_='morphology', pretty_print=pretty_print)
if self.biophysicalProperties is not None:
self.biophysicalProperties.export(outfile, level, namespace_, name_='biophysicalProperties', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='Cell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.biophysicalProperties_attr is not None and 'biophysicalProperties_attr' not in already_processed:
already_processed.add('biophysicalProperties_attr')
showIndent(outfile, level)
outfile.write('biophysicalProperties_attr="%s",\n' % (self.biophysicalProperties_attr,))
if self.morphology_attr is not None and 'morphology_attr' not in already_processed:
already_processed.add('morphology_attr')
showIndent(outfile, level)
outfile.write('morphology_attr="%s",\n' % (self.morphology_attr,))
super(Cell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Cell, self).exportLiteralChildren(outfile, level, name_)
if self.morphology is not None:
showIndent(outfile, level)
outfile.write('morphology=model_.Morphology(\n')
self.morphology.exportLiteral(outfile, level, name_='morphology')
showIndent(outfile, level)
outfile.write('),\n')
if self.biophysicalProperties is not None:
showIndent(outfile, level)
outfile.write('biophysicalProperties=model_.BiophysicalProperties(\n')
self.biophysicalProperties.exportLiteral(outfile, level, name_='biophysicalProperties')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('biophysicalProperties', node)
if value is not None and 'biophysicalProperties_attr' not in already_processed:
already_processed.add('biophysicalProperties_attr')
self.biophysicalProperties_attr = value
value = find_attr_value_('morphology', node)
if value is not None and 'morphology_attr' not in already_processed:
already_processed.add('morphology_attr')
self.morphology_attr = value
super(Cell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'morphology':
obj_ = Morphology.factory()
obj_.build(child_)
self.set_morphology(obj_)
elif nodeName_ == 'biophysicalProperties':
obj_ = BiophysicalProperties.factory()
obj_.build(child_)
self.set_biophysicalProperties(obj_)
super(Cell, self).buildChildren(child_, node, nodeName_, True)
# end class Cell
class AdExIaFCell(BaseCell):
subclass = None
superclass = BaseCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, reset=None, EL=None, C=None, b=None, refract=None, VT=None, delT=None, a=None, thresh=None, gL=None, tauw=None):
super(AdExIaFCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.reset = _cast(None, reset)
self.EL = _cast(None, EL)
self.C = _cast(None, C)
self.b = _cast(None, b)
self.refract = _cast(None, refract)
self.VT = _cast(None, VT)
self.delT = _cast(None, delT)
self.a = _cast(None, a)
self.thresh = _cast(None, thresh)
self.gL = _cast(None, gL)
self.tauw = _cast(None, tauw)
pass
def factory(*args_, **kwargs_):
if AdExIaFCell.subclass:
return AdExIaFCell.subclass(*args_, **kwargs_)
else:
return AdExIaFCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_reset(self): return self.reset
def set_reset(self, reset): self.reset = reset
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_EL(self): return self.EL
def set_EL(self, EL): self.EL = EL
def get_C(self): return self.C
def set_C(self, C): self.C = C
def validate_Nml2Quantity_capacitance(self, value):
# Validate type Nml2Quantity_capacitance, a restriction on xs:string.
pass
def get_b(self): return self.b
def set_b(self, b): self.b = b
def validate_Nml2Quantity_current(self, value):
# Validate type Nml2Quantity_current, a restriction on xs:string.
pass
def get_refract(self): return self.refract
def set_refract(self, refract): self.refract = refract
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_VT(self): return self.VT
def set_VT(self, VT): self.VT = VT
def get_delT(self): return self.delT
def set_delT(self, delT): self.delT = delT
def get_a(self): return self.a
def set_a(self, a): self.a = a
def validate_Nml2Quantity_conductance(self, value):
# Validate type Nml2Quantity_conductance, a restriction on xs:string.
pass
def get_thresh(self): return self.thresh
def set_thresh(self, thresh): self.thresh = thresh
def get_gL(self): return self.gL
def set_gL(self, gL): self.gL = gL
def get_tauw(self): return self.tauw
def set_tauw(self, tauw): self.tauw = tauw
def hasContent_(self):
if (
super(AdExIaFCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AdExIaFCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AdExIaFCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AdExIaFCell'):
super(AdExIaFCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AdExIaFCell')
if self.reset is not None and 'reset' not in already_processed:
already_processed.add('reset')
outfile.write(' reset=%s' % (quote_attrib(self.reset), ))
if self.EL is not None and 'EL' not in already_processed:
already_processed.add('EL')
outfile.write(' EL=%s' % (quote_attrib(self.EL), ))
if self.C is not None and 'C' not in already_processed:
already_processed.add('C')
outfile.write(' C=%s' % (quote_attrib(self.C), ))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
outfile.write(' b=%s' % (quote_attrib(self.b), ))
if self.refract is not None and 'refract' not in already_processed:
already_processed.add('refract')
outfile.write(' refract=%s' % (quote_attrib(self.refract), ))
if self.VT is not None and 'VT' not in already_processed:
already_processed.add('VT')
outfile.write(' VT=%s' % (quote_attrib(self.VT), ))
if self.delT is not None and 'delT' not in already_processed:
already_processed.add('delT')
outfile.write(' delT=%s' % (quote_attrib(self.delT), ))
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
outfile.write(' a=%s' % (quote_attrib(self.a), ))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
outfile.write(' thresh=%s' % (quote_attrib(self.thresh), ))
if self.gL is not None and 'gL' not in already_processed:
already_processed.add('gL')
outfile.write(' gL=%s' % (quote_attrib(self.gL), ))
if self.tauw is not None and 'tauw' not in already_processed:
already_processed.add('tauw')
outfile.write(' tauw=%s' % (quote_attrib(self.tauw), ))
def exportChildren(self, outfile, level, namespace_='', name_='AdExIaFCell', fromsubclass_=False, pretty_print=True):
super(AdExIaFCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='AdExIaFCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.reset is not None and 'reset' not in already_processed:
already_processed.add('reset')
showIndent(outfile, level)
outfile.write('reset="%s",\n' % (self.reset,))
if self.EL is not None and 'EL' not in already_processed:
already_processed.add('EL')
showIndent(outfile, level)
outfile.write('EL="%s",\n' % (self.EL,))
if self.C is not None and 'C' not in already_processed:
already_processed.add('C')
showIndent(outfile, level)
outfile.write('C="%s",\n' % (self.C,))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
showIndent(outfile, level)
outfile.write('b="%s",\n' % (self.b,))
if self.refract is not None and 'refract' not in already_processed:
already_processed.add('refract')
showIndent(outfile, level)
outfile.write('refract="%s",\n' % (self.refract,))
if self.VT is not None and 'VT' not in already_processed:
already_processed.add('VT')
showIndent(outfile, level)
outfile.write('VT="%s",\n' % (self.VT,))
if self.delT is not None and 'delT' not in already_processed:
already_processed.add('delT')
showIndent(outfile, level)
outfile.write('delT="%s",\n' % (self.delT,))
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
showIndent(outfile, level)
outfile.write('a="%s",\n' % (self.a,))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
showIndent(outfile, level)
outfile.write('thresh="%s",\n' % (self.thresh,))
if self.gL is not None and 'gL' not in already_processed:
already_processed.add('gL')
showIndent(outfile, level)
outfile.write('gL="%s",\n' % (self.gL,))
if self.tauw is not None and 'tauw' not in already_processed:
already_processed.add('tauw')
showIndent(outfile, level)
outfile.write('tauw="%s",\n' % (self.tauw,))
super(AdExIaFCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AdExIaFCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reset', node)
if value is not None and 'reset' not in already_processed:
already_processed.add('reset')
self.reset = value
self.validate_Nml2Quantity_voltage(self.reset) # validate type Nml2Quantity_voltage
value = find_attr_value_('EL', node)
if value is not None and 'EL' not in already_processed:
already_processed.add('EL')
self.EL = value
self.validate_Nml2Quantity_voltage(self.EL) # validate type Nml2Quantity_voltage
value = find_attr_value_('C', node)
if value is not None and 'C' not in already_processed:
already_processed.add('C')
self.C = value
self.validate_Nml2Quantity_capacitance(self.C) # validate type Nml2Quantity_capacitance
value = find_attr_value_('b', node)
if value is not None and 'b' not in already_processed:
already_processed.add('b')
self.b = value
self.validate_Nml2Quantity_current(self.b) # validate type Nml2Quantity_current
value = find_attr_value_('refract', node)
if value is not None and 'refract' not in already_processed:
already_processed.add('refract')
self.refract = value
self.validate_Nml2Quantity_time(self.refract) # validate type Nml2Quantity_time
value = find_attr_value_('VT', node)
if value is not None and 'VT' not in already_processed:
already_processed.add('VT')
self.VT = value
self.validate_Nml2Quantity_voltage(self.VT) # validate type Nml2Quantity_voltage
value = find_attr_value_('delT', node)
if value is not None and 'delT' not in already_processed:
already_processed.add('delT')
self.delT = value
self.validate_Nml2Quantity_voltage(self.delT) # validate type Nml2Quantity_voltage
value = find_attr_value_('a', node)
if value is not None and 'a' not in already_processed:
already_processed.add('a')
self.a = value
self.validate_Nml2Quantity_conductance(self.a) # validate type Nml2Quantity_conductance
value = find_attr_value_('thresh', node)
if value is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
self.thresh = value
self.validate_Nml2Quantity_voltage(self.thresh) # validate type Nml2Quantity_voltage
value = find_attr_value_('gL', node)
if value is not None and 'gL' not in already_processed:
already_processed.add('gL')
self.gL = value
self.validate_Nml2Quantity_conductance(self.gL) # validate type Nml2Quantity_conductance
value = find_attr_value_('tauw', node)
if value is not None and 'tauw' not in already_processed:
already_processed.add('tauw')
self.tauw = value
self.validate_Nml2Quantity_time(self.tauw) # validate type Nml2Quantity_time
super(AdExIaFCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AdExIaFCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AdExIaFCell
class IzhikevichCell(BaseCell):
subclass = None
superclass = BaseCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, a=None, c=None, b=None, d=None, v0=None, thresh=None):
super(IzhikevichCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, )
self.a = _cast(None, a)
self.c = _cast(None, c)
self.b = _cast(None, b)
self.d = _cast(None, d)
self.v0 = _cast(None, v0)
self.thresh = _cast(None, thresh)
pass
def factory(*args_, **kwargs_):
if IzhikevichCell.subclass:
return IzhikevichCell.subclass(*args_, **kwargs_)
else:
return IzhikevichCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_a(self): return self.a
def set_a(self, a): self.a = a
def validate_Nml2Quantity_none(self, value):
# Validate type Nml2Quantity_none, a restriction on xs:string.
pass
def get_c(self): return self.c
def set_c(self, c): self.c = c
def get_b(self): return self.b
def set_b(self, b): self.b = b
def get_d(self): return self.d
def set_d(self, d): self.d = d
def get_v0(self): return self.v0
def set_v0(self, v0): self.v0 = v0
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_thresh(self): return self.thresh
def set_thresh(self, thresh): self.thresh = thresh
def hasContent_(self):
if (
super(IzhikevichCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IzhikevichCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IzhikevichCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IzhikevichCell'):
super(IzhikevichCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IzhikevichCell')
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
outfile.write(' a=%s' % (quote_attrib(self.a), ))
if self.c is not None and 'c' not in already_processed:
already_processed.add('c')
outfile.write(' c=%s' % (quote_attrib(self.c), ))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
outfile.write(' b=%s' % (quote_attrib(self.b), ))
if self.d is not None and 'd' not in already_processed:
already_processed.add('d')
outfile.write(' d=%s' % (quote_attrib(self.d), ))
if self.v0 is not None and 'v0' not in already_processed:
already_processed.add('v0')
outfile.write(' v0=%s' % (quote_attrib(self.v0), ))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
outfile.write(' thresh=%s' % (quote_attrib(self.thresh), ))
def exportChildren(self, outfile, level, namespace_='', name_='IzhikevichCell', fromsubclass_=False, pretty_print=True):
super(IzhikevichCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IzhikevichCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
showIndent(outfile, level)
outfile.write('a="%s",\n' % (self.a,))
if self.c is not None and 'c' not in already_processed:
already_processed.add('c')
showIndent(outfile, level)
outfile.write('c="%s",\n' % (self.c,))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
showIndent(outfile, level)
outfile.write('b="%s",\n' % (self.b,))
if self.d is not None and 'd' not in already_processed:
already_processed.add('d')
showIndent(outfile, level)
outfile.write('d="%s",\n' % (self.d,))
if self.v0 is not None and 'v0' not in already_processed:
already_processed.add('v0')
showIndent(outfile, level)
outfile.write('v0="%s",\n' % (self.v0,))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
showIndent(outfile, level)
outfile.write('thresh="%s",\n' % (self.thresh,))
super(IzhikevichCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IzhikevichCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('a', node)
if value is not None and 'a' not in already_processed:
already_processed.add('a')
self.a = value
self.validate_Nml2Quantity_none(self.a) # validate type Nml2Quantity_none
value = find_attr_value_('c', node)
if value is not None and 'c' not in already_processed:
already_processed.add('c')
self.c = value
self.validate_Nml2Quantity_none(self.c) # validate type Nml2Quantity_none
value = find_attr_value_('b', node)
if value is not None and 'b' not in already_processed:
already_processed.add('b')
self.b = value
self.validate_Nml2Quantity_none(self.b) # validate type Nml2Quantity_none
value = find_attr_value_('d', node)
if value is not None and 'd' not in already_processed:
already_processed.add('d')
self.d = value
self.validate_Nml2Quantity_none(self.d) # validate type Nml2Quantity_none
value = find_attr_value_('v0', node)
if value is not None and 'v0' not in already_processed:
already_processed.add('v0')
self.v0 = value
self.validate_Nml2Quantity_voltage(self.v0) # validate type Nml2Quantity_voltage
value = find_attr_value_('thresh', node)
if value is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
self.thresh = value
self.validate_Nml2Quantity_voltage(self.thresh) # validate type Nml2Quantity_voltage
super(IzhikevichCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IzhikevichCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IzhikevichCell
class IaFCell(BaseCell):
subclass = None
superclass = BaseCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, reset=None, C=None, thresh=None, leakConductance=None, leakReversal=None, extensiontype_=None):
super(IaFCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.reset = _cast(None, reset)
self.C = _cast(None, C)
self.thresh = _cast(None, thresh)
self.leakConductance = _cast(None, leakConductance)
self.leakReversal = _cast(None, leakReversal)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if IaFCell.subclass:
return IaFCell.subclass(*args_, **kwargs_)
else:
return IaFCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_reset(self): return self.reset
def set_reset(self, reset): self.reset = reset
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_C(self): return self.C
def set_C(self, C): self.C = C
def validate_Nml2Quantity_capacitance(self, value):
# Validate type Nml2Quantity_capacitance, a restriction on xs:string.
pass
def get_thresh(self): return self.thresh
def set_thresh(self, thresh): self.thresh = thresh
def get_leakConductance(self): return self.leakConductance
def set_leakConductance(self, leakConductance): self.leakConductance = leakConductance
def validate_Nml2Quantity_conductance(self, value):
# Validate type Nml2Quantity_conductance, a restriction on xs:string.
pass
def get_leakReversal(self): return self.leakReversal
def set_leakReversal(self, leakReversal): self.leakReversal = leakReversal
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(IaFCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IaFCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IaFCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IaFCell'):
super(IaFCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IaFCell')
if self.reset is not None and 'reset' not in already_processed:
already_processed.add('reset')
outfile.write(' reset=%s' % (quote_attrib(self.reset), ))
if self.C is not None and 'C' not in already_processed:
already_processed.add('C')
outfile.write(' C=%s' % (quote_attrib(self.C), ))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
outfile.write(' thresh=%s' % (quote_attrib(self.thresh), ))
if self.leakConductance is not None and 'leakConductance' not in already_processed:
already_processed.add('leakConductance')
outfile.write(' leakConductance=%s' % (quote_attrib(self.leakConductance), ))
if self.leakReversal is not None and 'leakReversal' not in already_processed:
already_processed.add('leakReversal')
outfile.write(' leakReversal=%s' % (quote_attrib(self.leakReversal), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='IaFCell', fromsubclass_=False, pretty_print=True):
super(IaFCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IaFCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.reset is not None and 'reset' not in already_processed:
already_processed.add('reset')
showIndent(outfile, level)
outfile.write('reset="%s",\n' % (self.reset,))
if self.C is not None and 'C' not in already_processed:
already_processed.add('C')
showIndent(outfile, level)
outfile.write('C="%s",\n' % (self.C,))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
showIndent(outfile, level)
outfile.write('thresh="%s",\n' % (self.thresh,))
if self.leakConductance is not None and 'leakConductance' not in already_processed:
already_processed.add('leakConductance')
showIndent(outfile, level)
outfile.write('leakConductance="%s",\n' % (self.leakConductance,))
if self.leakReversal is not None and 'leakReversal' not in already_processed:
already_processed.add('leakReversal')
showIndent(outfile, level)
outfile.write('leakReversal="%s",\n' % (self.leakReversal,))
super(IaFCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IaFCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reset', node)
if value is not None and 'reset' not in already_processed:
already_processed.add('reset')
self.reset = value
self.validate_Nml2Quantity_voltage(self.reset) # validate type Nml2Quantity_voltage
value = find_attr_value_('C', node)
if value is not None and 'C' not in already_processed:
already_processed.add('C')
self.C = value
self.validate_Nml2Quantity_capacitance(self.C) # validate type Nml2Quantity_capacitance
value = find_attr_value_('thresh', node)
if value is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
self.thresh = value
self.validate_Nml2Quantity_voltage(self.thresh) # validate type Nml2Quantity_voltage
value = find_attr_value_('leakConductance', node)
if value is not None and 'leakConductance' not in already_processed:
already_processed.add('leakConductance')
self.leakConductance = value
self.validate_Nml2Quantity_conductance(self.leakConductance) # validate type Nml2Quantity_conductance
value = find_attr_value_('leakReversal', node)
if value is not None and 'leakReversal' not in already_processed:
already_processed.add('leakReversal')
self.leakReversal = value
self.validate_Nml2Quantity_voltage(self.leakReversal) # validate type Nml2Quantity_voltage
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(IaFCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IaFCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IaFCell
class IaFTauCell(BaseCell):
subclass = None
superclass = BaseCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, reset=None, tau=None, thresh=None, leakReversal=None, extensiontype_=None):
super(IaFTauCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.reset = _cast(None, reset)
self.tau = _cast(None, tau)
self.thresh = _cast(None, thresh)
self.leakReversal = _cast(None, leakReversal)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if IaFTauCell.subclass:
return IaFTauCell.subclass(*args_, **kwargs_)
else:
return IaFTauCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_reset(self): return self.reset
def set_reset(self, reset): self.reset = reset
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_tau(self): return self.tau
def set_tau(self, tau): self.tau = tau
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_thresh(self): return self.thresh
def set_thresh(self, thresh): self.thresh = thresh
def get_leakReversal(self): return self.leakReversal
def set_leakReversal(self, leakReversal): self.leakReversal = leakReversal
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(IaFTauCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IaFTauCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IaFTauCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IaFTauCell'):
super(IaFTauCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IaFTauCell')
if self.reset is not None and 'reset' not in already_processed:
already_processed.add('reset')
outfile.write(' reset=%s' % (quote_attrib(self.reset), ))
if self.tau is not None and 'tau' not in already_processed:
already_processed.add('tau')
outfile.write(' tau=%s' % (quote_attrib(self.tau), ))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
outfile.write(' thresh=%s' % (quote_attrib(self.thresh), ))
if self.leakReversal is not None and 'leakReversal' not in already_processed:
already_processed.add('leakReversal')
outfile.write(' leakReversal=%s' % (quote_attrib(self.leakReversal), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='IaFTauCell', fromsubclass_=False, pretty_print=True):
super(IaFTauCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IaFTauCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.reset is not None and 'reset' not in already_processed:
already_processed.add('reset')
showIndent(outfile, level)
outfile.write('reset="%s",\n' % (self.reset,))
if self.tau is not None and 'tau' not in already_processed:
already_processed.add('tau')
showIndent(outfile, level)
outfile.write('tau="%s",\n' % (self.tau,))
if self.thresh is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
showIndent(outfile, level)
outfile.write('thresh="%s",\n' % (self.thresh,))
if self.leakReversal is not None and 'leakReversal' not in already_processed:
already_processed.add('leakReversal')
showIndent(outfile, level)
outfile.write('leakReversal="%s",\n' % (self.leakReversal,))
super(IaFTauCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IaFTauCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reset', node)
if value is not None and 'reset' not in already_processed:
already_processed.add('reset')
self.reset = value
self.validate_Nml2Quantity_voltage(self.reset) # validate type Nml2Quantity_voltage
value = find_attr_value_('tau', node)
if value is not None and 'tau' not in already_processed:
already_processed.add('tau')
self.tau = value
self.validate_Nml2Quantity_time(self.tau) # validate type Nml2Quantity_time
value = find_attr_value_('thresh', node)
if value is not None and 'thresh' not in already_processed:
already_processed.add('thresh')
self.thresh = value
self.validate_Nml2Quantity_voltage(self.thresh) # validate type Nml2Quantity_voltage
value = find_attr_value_('leakReversal', node)
if value is not None and 'leakReversal' not in already_processed:
already_processed.add('leakReversal')
self.leakReversal = value
self.validate_Nml2Quantity_voltage(self.leakReversal) # validate type Nml2Quantity_voltage
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(IaFTauCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IaFTauCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IaFTauCell
class BaseConductanceBasedSynapse(BaseSynapse):
subclass = None
superclass = BaseSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, erev=None, gbase=None, extensiontype_=None):
super(BaseConductanceBasedSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, extensiontype_, )
self.erev = _cast(None, erev)
self.gbase = _cast(None, gbase)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if BaseConductanceBasedSynapse.subclass:
return BaseConductanceBasedSynapse.subclass(*args_, **kwargs_)
else:
return BaseConductanceBasedSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_erev(self): return self.erev
def set_erev(self, erev): self.erev = erev
def validate_Nml2Quantity_voltage(self, value):
# Validate type Nml2Quantity_voltage, a restriction on xs:string.
pass
def get_gbase(self): return self.gbase
def set_gbase(self, gbase): self.gbase = gbase
def validate_Nml2Quantity_conductance(self, value):
# Validate type Nml2Quantity_conductance, a restriction on xs:string.
pass
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(BaseConductanceBasedSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BaseConductanceBasedSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BaseConductanceBasedSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BaseConductanceBasedSynapse'):
super(BaseConductanceBasedSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BaseConductanceBasedSynapse')
if self.erev is not None and 'erev' not in already_processed:
already_processed.add('erev')
outfile.write(' erev=%s' % (quote_attrib(self.erev), ))
if self.gbase is not None and 'gbase' not in already_processed:
already_processed.add('gbase')
outfile.write(' gbase=%s' % (quote_attrib(self.gbase), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='BaseConductanceBasedSynapse', fromsubclass_=False, pretty_print=True):
super(BaseConductanceBasedSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='BaseConductanceBasedSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.erev is not None and 'erev' not in already_processed:
already_processed.add('erev')
showIndent(outfile, level)
outfile.write('erev="%s",\n' % (self.erev,))
if self.gbase is not None and 'gbase' not in already_processed:
already_processed.add('gbase')
showIndent(outfile, level)
outfile.write('gbase="%s",\n' % (self.gbase,))
super(BaseConductanceBasedSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BaseConductanceBasedSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('erev', node)
if value is not None and 'erev' not in already_processed:
already_processed.add('erev')
self.erev = value
self.validate_Nml2Quantity_voltage(self.erev) # validate type Nml2Quantity_voltage
value = find_attr_value_('gbase', node)
if value is not None and 'gbase' not in already_processed:
already_processed.add('gbase')
self.gbase = value
self.validate_Nml2Quantity_conductance(self.gbase) # validate type Nml2Quantity_conductance
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BaseConductanceBasedSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BaseConductanceBasedSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BaseConductanceBasedSynapse
class AlphaCurrSynapse(BasePynnSynapse):
subclass = None
superclass = BasePynnSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn=None):
super(AlphaCurrSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn, )
pass
def factory(*args_, **kwargs_):
if AlphaCurrSynapse.subclass:
return AlphaCurrSynapse.subclass(*args_, **kwargs_)
else:
return AlphaCurrSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(AlphaCurrSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AlphaCurrSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AlphaCurrSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AlphaCurrSynapse'):
super(AlphaCurrSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AlphaCurrSynapse')
def exportChildren(self, outfile, level, namespace_='', name_='AlphaCurrSynapse', fromsubclass_=False, pretty_print=True):
super(AlphaCurrSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='AlphaCurrSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AlphaCurrSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AlphaCurrSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AlphaCurrSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AlphaCurrSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AlphaCurrSynapse
class ExpCurrSynapse(BasePynnSynapse):
subclass = None
superclass = BasePynnSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn=None):
super(ExpCurrSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn, )
pass
def factory(*args_, **kwargs_):
if ExpCurrSynapse.subclass:
return ExpCurrSynapse.subclass(*args_, **kwargs_)
else:
return ExpCurrSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(ExpCurrSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExpCurrSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExpCurrSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExpCurrSynapse'):
super(ExpCurrSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ExpCurrSynapse')
def exportChildren(self, outfile, level, namespace_='', name_='ExpCurrSynapse', fromsubclass_=False, pretty_print=True):
super(ExpCurrSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ExpCurrSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ExpCurrSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ExpCurrSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ExpCurrSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ExpCurrSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ExpCurrSynapse
class AlphaCondSynapse(BasePynnSynapse):
subclass = None
superclass = BasePynnSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn=None, e_rev=None):
super(AlphaCondSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn, )
self.e_rev = _cast(float, e_rev)
pass
def factory(*args_, **kwargs_):
if AlphaCondSynapse.subclass:
return AlphaCondSynapse.subclass(*args_, **kwargs_)
else:
return AlphaCondSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_e_rev(self): return self.e_rev
def set_e_rev(self, e_rev): self.e_rev = e_rev
def hasContent_(self):
if (
super(AlphaCondSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AlphaCondSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AlphaCondSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AlphaCondSynapse'):
super(AlphaCondSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AlphaCondSynapse')
if self.e_rev is not None and 'e_rev' not in already_processed:
already_processed.add('e_rev')
outfile.write(' e_rev="%s"' % self.gds_format_double(self.e_rev, input_name='e_rev'))
def exportChildren(self, outfile, level, namespace_='', name_='AlphaCondSynapse', fromsubclass_=False, pretty_print=True):
super(AlphaCondSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='AlphaCondSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.e_rev is not None and 'e_rev' not in already_processed:
already_processed.add('e_rev')
showIndent(outfile, level)
outfile.write('e_rev=%e,\n' % (self.e_rev,))
super(AlphaCondSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AlphaCondSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('e_rev', node)
if value is not None and 'e_rev' not in already_processed:
already_processed.add('e_rev')
try:
self.e_rev = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev): %s' % exp)
super(AlphaCondSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AlphaCondSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AlphaCondSynapse
class ExpCondSynapse(BasePynnSynapse):
subclass = None
superclass = BasePynnSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn=None, e_rev=None):
super(ExpCondSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn, )
self.e_rev = _cast(float, e_rev)
pass
def factory(*args_, **kwargs_):
if ExpCondSynapse.subclass:
return ExpCondSynapse.subclass(*args_, **kwargs_)
else:
return ExpCondSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_e_rev(self): return self.e_rev
def set_e_rev(self, e_rev): self.e_rev = e_rev
def hasContent_(self):
if (
super(ExpCondSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExpCondSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExpCondSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExpCondSynapse'):
super(ExpCondSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ExpCondSynapse')
if self.e_rev is not None and 'e_rev' not in already_processed:
already_processed.add('e_rev')
outfile.write(' e_rev="%s"' % self.gds_format_double(self.e_rev, input_name='e_rev'))
def exportChildren(self, outfile, level, namespace_='', name_='ExpCondSynapse', fromsubclass_=False, pretty_print=True):
super(ExpCondSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ExpCondSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.e_rev is not None and 'e_rev' not in already_processed:
already_processed.add('e_rev')
showIndent(outfile, level)
outfile.write('e_rev=%e,\n' % (self.e_rev,))
super(ExpCondSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ExpCondSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('e_rev', node)
if value is not None and 'e_rev' not in already_processed:
already_processed.add('e_rev')
try:
self.e_rev = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev): %s' % exp)
super(ExpCondSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ExpCondSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ExpCondSynapse
class HH_cond_exp(basePyNNCell):
subclass = None
superclass = basePyNNCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, gbar_K=None, e_rev_E=None, g_leak=None, e_rev_Na=None, e_rev_I=None, e_rev_K=None, e_rev_leak=None, v_offset=None, gbar_Na=None):
super(HH_cond_exp, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, )
self.gbar_K = _cast(float, gbar_K)
self.e_rev_E = _cast(float, e_rev_E)
self.g_leak = _cast(float, g_leak)
self.e_rev_Na = _cast(float, e_rev_Na)
self.e_rev_I = _cast(float, e_rev_I)
self.e_rev_K = _cast(float, e_rev_K)
self.e_rev_leak = _cast(float, e_rev_leak)
self.v_offset = _cast(float, v_offset)
self.gbar_Na = _cast(float, gbar_Na)
pass
def factory(*args_, **kwargs_):
if HH_cond_exp.subclass:
return HH_cond_exp.subclass(*args_, **kwargs_)
else:
return HH_cond_exp(*args_, **kwargs_)
factory = staticmethod(factory)
def get_gbar_K(self): return self.gbar_K
def set_gbar_K(self, gbar_K): self.gbar_K = gbar_K
def get_e_rev_E(self): return self.e_rev_E
def set_e_rev_E(self, e_rev_E): self.e_rev_E = e_rev_E
def get_g_leak(self): return self.g_leak
def set_g_leak(self, g_leak): self.g_leak = g_leak
def get_e_rev_Na(self): return self.e_rev_Na
def set_e_rev_Na(self, e_rev_Na): self.e_rev_Na = e_rev_Na
def get_e_rev_I(self): return self.e_rev_I
def set_e_rev_I(self, e_rev_I): self.e_rev_I = e_rev_I
def get_e_rev_K(self): return self.e_rev_K
def set_e_rev_K(self, e_rev_K): self.e_rev_K = e_rev_K
def get_e_rev_leak(self): return self.e_rev_leak
def set_e_rev_leak(self, e_rev_leak): self.e_rev_leak = e_rev_leak
def get_v_offset(self): return self.v_offset
def set_v_offset(self, v_offset): self.v_offset = v_offset
def get_gbar_Na(self): return self.gbar_Na
def set_gbar_Na(self, gbar_Na): self.gbar_Na = gbar_Na
def hasContent_(self):
if (
super(HH_cond_exp, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='HH_cond_exp', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='HH_cond_exp')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='HH_cond_exp'):
super(HH_cond_exp, self).exportAttributes(outfile, level, already_processed, namespace_, name_='HH_cond_exp')
if self.gbar_K is not None and 'gbar_K' not in already_processed:
already_processed.add('gbar_K')
outfile.write(' gbar_K="%s"' % self.gds_format_double(self.gbar_K, input_name='gbar_K'))
if self.e_rev_E is not None and 'e_rev_E' not in already_processed:
already_processed.add('e_rev_E')
outfile.write(' e_rev_E="%s"' % self.gds_format_double(self.e_rev_E, input_name='e_rev_E'))
if self.g_leak is not None and 'g_leak' not in already_processed:
already_processed.add('g_leak')
outfile.write(' g_leak="%s"' % self.gds_format_double(self.g_leak, input_name='g_leak'))
if self.e_rev_Na is not None and 'e_rev_Na' not in already_processed:
already_processed.add('e_rev_Na')
outfile.write(' e_rev_Na="%s"' % self.gds_format_double(self.e_rev_Na, input_name='e_rev_Na'))
if self.e_rev_I is not None and 'e_rev_I' not in already_processed:
already_processed.add('e_rev_I')
outfile.write(' e_rev_I="%s"' % self.gds_format_double(self.e_rev_I, input_name='e_rev_I'))
if self.e_rev_K is not None and 'e_rev_K' not in already_processed:
already_processed.add('e_rev_K')
outfile.write(' e_rev_K="%s"' % self.gds_format_double(self.e_rev_K, input_name='e_rev_K'))
if self.e_rev_leak is not None and 'e_rev_leak' not in already_processed:
already_processed.add('e_rev_leak')
outfile.write(' e_rev_leak="%s"' % self.gds_format_double(self.e_rev_leak, input_name='e_rev_leak'))
if self.v_offset is not None and 'v_offset' not in already_processed:
already_processed.add('v_offset')
outfile.write(' v_offset="%s"' % self.gds_format_double(self.v_offset, input_name='v_offset'))
if self.gbar_Na is not None and 'gbar_Na' not in already_processed:
already_processed.add('gbar_Na')
outfile.write(' gbar_Na="%s"' % self.gds_format_double(self.gbar_Na, input_name='gbar_Na'))
def exportChildren(self, outfile, level, namespace_='', name_='HH_cond_exp', fromsubclass_=False, pretty_print=True):
super(HH_cond_exp, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='HH_cond_exp'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.gbar_K is not None and 'gbar_K' not in already_processed:
already_processed.add('gbar_K')
showIndent(outfile, level)
outfile.write('gbar_K=%e,\n' % (self.gbar_K,))
if self.e_rev_E is not None and 'e_rev_E' not in already_processed:
already_processed.add('e_rev_E')
showIndent(outfile, level)
outfile.write('e_rev_E=%e,\n' % (self.e_rev_E,))
if self.g_leak is not None and 'g_leak' not in already_processed:
already_processed.add('g_leak')
showIndent(outfile, level)
outfile.write('g_leak=%e,\n' % (self.g_leak,))
if self.e_rev_Na is not None and 'e_rev_Na' not in already_processed:
already_processed.add('e_rev_Na')
showIndent(outfile, level)
outfile.write('e_rev_Na=%e,\n' % (self.e_rev_Na,))
if self.e_rev_I is not None and 'e_rev_I' not in already_processed:
already_processed.add('e_rev_I')
showIndent(outfile, level)
outfile.write('e_rev_I=%e,\n' % (self.e_rev_I,))
if self.e_rev_K is not None and 'e_rev_K' not in already_processed:
already_processed.add('e_rev_K')
showIndent(outfile, level)
outfile.write('e_rev_K=%e,\n' % (self.e_rev_K,))
if self.e_rev_leak is not None and 'e_rev_leak' not in already_processed:
already_processed.add('e_rev_leak')
showIndent(outfile, level)
outfile.write('e_rev_leak=%e,\n' % (self.e_rev_leak,))
if self.v_offset is not None and 'v_offset' not in already_processed:
already_processed.add('v_offset')
showIndent(outfile, level)
outfile.write('v_offset=%e,\n' % (self.v_offset,))
if self.gbar_Na is not None and 'gbar_Na' not in already_processed:
already_processed.add('gbar_Na')
showIndent(outfile, level)
outfile.write('gbar_Na=%e,\n' % (self.gbar_Na,))
super(HH_cond_exp, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(HH_cond_exp, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('gbar_K', node)
if value is not None and 'gbar_K' not in already_processed:
already_processed.add('gbar_K')
try:
self.gbar_K = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (gbar_K): %s' % exp)
value = find_attr_value_('e_rev_E', node)
if value is not None and 'e_rev_E' not in already_processed:
already_processed.add('e_rev_E')
try:
self.e_rev_E = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev_E): %s' % exp)
value = find_attr_value_('g_leak', node)
if value is not None and 'g_leak' not in already_processed:
already_processed.add('g_leak')
try:
self.g_leak = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (g_leak): %s' % exp)
value = find_attr_value_('e_rev_Na', node)
if value is not None and 'e_rev_Na' not in already_processed:
already_processed.add('e_rev_Na')
try:
self.e_rev_Na = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev_Na): %s' % exp)
value = find_attr_value_('e_rev_I', node)
if value is not None and 'e_rev_I' not in already_processed:
already_processed.add('e_rev_I')
try:
self.e_rev_I = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev_I): %s' % exp)
value = find_attr_value_('e_rev_K', node)
if value is not None and 'e_rev_K' not in already_processed:
already_processed.add('e_rev_K')
try:
self.e_rev_K = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev_K): %s' % exp)
value = find_attr_value_('e_rev_leak', node)
if value is not None and 'e_rev_leak' not in already_processed:
already_processed.add('e_rev_leak')
try:
self.e_rev_leak = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev_leak): %s' % exp)
value = find_attr_value_('v_offset', node)
if value is not None and 'v_offset' not in already_processed:
already_processed.add('v_offset')
try:
self.v_offset = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (v_offset): %s' % exp)
value = find_attr_value_('gbar_Na', node)
if value is not None and 'gbar_Na' not in already_processed:
already_processed.add('gbar_Na')
try:
self.gbar_Na = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (gbar_Na): %s' % exp)
super(HH_cond_exp, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(HH_cond_exp, self).buildChildren(child_, node, nodeName_, True)
pass
# end class HH_cond_exp
class basePyNNIaFCell(basePyNNCell):
subclass = None
superclass = basePyNNCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None, extensiontype_=None):
super(basePyNNIaFCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, extensiontype_, )
self.tau_refrac = _cast(float, tau_refrac)
self.v_thresh = _cast(float, v_thresh)
self.tau_m = _cast(float, tau_m)
self.v_reset = _cast(float, v_reset)
self.v_rest = _cast(float, v_rest)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if basePyNNIaFCell.subclass:
return basePyNNIaFCell.subclass(*args_, **kwargs_)
else:
return basePyNNIaFCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tau_refrac(self): return self.tau_refrac
def set_tau_refrac(self, tau_refrac): self.tau_refrac = tau_refrac
def get_v_thresh(self): return self.v_thresh
def set_v_thresh(self, v_thresh): self.v_thresh = v_thresh
def get_tau_m(self): return self.tau_m
def set_tau_m(self, tau_m): self.tau_m = tau_m
def get_v_reset(self): return self.v_reset
def set_v_reset(self, v_reset): self.v_reset = v_reset
def get_v_rest(self): return self.v_rest
def set_v_rest(self, v_rest): self.v_rest = v_rest
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(basePyNNIaFCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='basePyNNIaFCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='basePyNNIaFCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='basePyNNIaFCell'):
super(basePyNNIaFCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='basePyNNIaFCell')
if self.tau_refrac is not None and 'tau_refrac' not in already_processed:
already_processed.add('tau_refrac')
outfile.write(' tau_refrac="%s"' % self.gds_format_double(self.tau_refrac, input_name='tau_refrac'))
if self.v_thresh is not None and 'v_thresh' not in already_processed:
already_processed.add('v_thresh')
outfile.write(' v_thresh="%s"' % self.gds_format_double(self.v_thresh, input_name='v_thresh'))
if self.tau_m is not None and 'tau_m' not in already_processed:
already_processed.add('tau_m')
outfile.write(' tau_m="%s"' % self.gds_format_double(self.tau_m, input_name='tau_m'))
if self.v_reset is not None and 'v_reset' not in already_processed:
already_processed.add('v_reset')
outfile.write(' v_reset="%s"' % self.gds_format_double(self.v_reset, input_name='v_reset'))
if self.v_rest is not None and 'v_rest' not in already_processed:
already_processed.add('v_rest')
outfile.write(' v_rest="%s"' % self.gds_format_double(self.v_rest, input_name='v_rest'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='basePyNNIaFCell', fromsubclass_=False, pretty_print=True):
super(basePyNNIaFCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='basePyNNIaFCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tau_refrac is not None and 'tau_refrac' not in already_processed:
already_processed.add('tau_refrac')
showIndent(outfile, level)
outfile.write('tau_refrac=%e,\n' % (self.tau_refrac,))
if self.v_thresh is not None and 'v_thresh' not in already_processed:
already_processed.add('v_thresh')
showIndent(outfile, level)
outfile.write('v_thresh=%e,\n' % (self.v_thresh,))
if self.tau_m is not None and 'tau_m' not in already_processed:
already_processed.add('tau_m')
showIndent(outfile, level)
outfile.write('tau_m=%e,\n' % (self.tau_m,))
if self.v_reset is not None and 'v_reset' not in already_processed:
already_processed.add('v_reset')
showIndent(outfile, level)
outfile.write('v_reset=%e,\n' % (self.v_reset,))
if self.v_rest is not None and 'v_rest' not in already_processed:
already_processed.add('v_rest')
showIndent(outfile, level)
outfile.write('v_rest=%e,\n' % (self.v_rest,))
super(basePyNNIaFCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(basePyNNIaFCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tau_refrac', node)
if value is not None and 'tau_refrac' not in already_processed:
already_processed.add('tau_refrac')
try:
self.tau_refrac = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (tau_refrac): %s' % exp)
value = find_attr_value_('v_thresh', node)
if value is not None and 'v_thresh' not in already_processed:
already_processed.add('v_thresh')
try:
self.v_thresh = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (v_thresh): %s' % exp)
value = find_attr_value_('tau_m', node)
if value is not None and 'tau_m' not in already_processed:
already_processed.add('tau_m')
try:
self.tau_m = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (tau_m): %s' % exp)
value = find_attr_value_('v_reset', node)
if value is not None and 'v_reset' not in already_processed:
already_processed.add('v_reset')
try:
self.v_reset = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (v_reset): %s' % exp)
value = find_attr_value_('v_rest', node)
if value is not None and 'v_rest' not in already_processed:
already_processed.add('v_rest')
try:
self.v_rest = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (v_rest): %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(basePyNNIaFCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(basePyNNIaFCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class basePyNNIaFCell
class IaFRefCell(IaFCell):
subclass = None
superclass = IaFCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, reset=None, C=None, thresh=None, leakConductance=None, leakReversal=None, refract=None):
super(IaFRefCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, reset, C, thresh, leakConductance, leakReversal, )
self.refract = _cast(None, refract)
pass
def factory(*args_, **kwargs_):
if IaFRefCell.subclass:
return IaFRefCell.subclass(*args_, **kwargs_)
else:
return IaFRefCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_refract(self): return self.refract
def set_refract(self, refract): self.refract = refract
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(IaFRefCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IaFRefCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IaFRefCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IaFRefCell'):
super(IaFRefCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IaFRefCell')
if self.refract is not None and 'refract' not in already_processed:
already_processed.add('refract')
outfile.write(' refract=%s' % (quote_attrib(self.refract), ))
def exportChildren(self, outfile, level, namespace_='', name_='IaFRefCell', fromsubclass_=False, pretty_print=True):
super(IaFRefCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IaFRefCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.refract is not None and 'refract' not in already_processed:
already_processed.add('refract')
showIndent(outfile, level)
outfile.write('refract="%s",\n' % (self.refract,))
super(IaFRefCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IaFRefCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('refract', node)
if value is not None and 'refract' not in already_processed:
already_processed.add('refract')
self.refract = value
self.validate_Nml2Quantity_time(self.refract) # validate type Nml2Quantity_time
super(IaFRefCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IaFRefCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IaFRefCell
class IaFTauRefCell(IaFTauCell):
subclass = None
superclass = IaFTauCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, reset=None, tau=None, thresh=None, leakReversal=None, refract=None):
super(IaFTauRefCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, reset, tau, thresh, leakReversal, )
self.refract = _cast(None, refract)
pass
def factory(*args_, **kwargs_):
if IaFTauRefCell.subclass:
return IaFTauRefCell.subclass(*args_, **kwargs_)
else:
return IaFTauRefCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_refract(self): return self.refract
def set_refract(self, refract): self.refract = refract
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(IaFTauRefCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IaFTauRefCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IaFTauRefCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IaFTauRefCell'):
super(IaFTauRefCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IaFTauRefCell')
if self.refract is not None and 'refract' not in already_processed:
already_processed.add('refract')
outfile.write(' refract=%s' % (quote_attrib(self.refract), ))
def exportChildren(self, outfile, level, namespace_='', name_='IaFTauRefCell', fromsubclass_=False, pretty_print=True):
super(IaFTauRefCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IaFTauRefCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.refract is not None and 'refract' not in already_processed:
already_processed.add('refract')
showIndent(outfile, level)
outfile.write('refract="%s",\n' % (self.refract,))
super(IaFTauRefCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IaFTauRefCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('refract', node)
if value is not None and 'refract' not in already_processed:
already_processed.add('refract')
self.refract = value
self.validate_Nml2Quantity_time(self.refract) # validate type Nml2Quantity_time
super(IaFTauRefCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IaFTauRefCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IaFTauRefCell
class ExpTwoSynapse(BaseConductanceBasedSynapse):
subclass = None
superclass = BaseConductanceBasedSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, erev=None, gbase=None, tauDecay=None, tauRise=None, extensiontype_=None):
super(ExpTwoSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, erev, gbase, extensiontype_, )
self.tauDecay = _cast(None, tauDecay)
self.tauRise = _cast(None, tauRise)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ExpTwoSynapse.subclass:
return ExpTwoSynapse.subclass(*args_, **kwargs_)
else:
return ExpTwoSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tauDecay(self): return self.tauDecay
def set_tauDecay(self, tauDecay): self.tauDecay = tauDecay
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def get_tauRise(self): return self.tauRise
def set_tauRise(self, tauRise): self.tauRise = tauRise
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(ExpTwoSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExpTwoSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExpTwoSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExpTwoSynapse'):
super(ExpTwoSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ExpTwoSynapse')
if self.tauDecay is not None and 'tauDecay' not in already_processed:
already_processed.add('tauDecay')
outfile.write(' tauDecay=%s' % (quote_attrib(self.tauDecay), ))
if self.tauRise is not None and 'tauRise' not in already_processed:
already_processed.add('tauRise')
outfile.write(' tauRise=%s' % (quote_attrib(self.tauRise), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ExpTwoSynapse', fromsubclass_=False, pretty_print=True):
super(ExpTwoSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ExpTwoSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tauDecay is not None and 'tauDecay' not in already_processed:
already_processed.add('tauDecay')
showIndent(outfile, level)
outfile.write('tauDecay="%s",\n' % (self.tauDecay,))
if self.tauRise is not None and 'tauRise' not in already_processed:
already_processed.add('tauRise')
showIndent(outfile, level)
outfile.write('tauRise="%s",\n' % (self.tauRise,))
super(ExpTwoSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ExpTwoSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tauDecay', node)
if value is not None and 'tauDecay' not in already_processed:
already_processed.add('tauDecay')
self.tauDecay = value
self.validate_Nml2Quantity_time(self.tauDecay) # validate type Nml2Quantity_time
value = find_attr_value_('tauRise', node)
if value is not None and 'tauRise' not in already_processed:
already_processed.add('tauRise')
self.tauRise = value
self.validate_Nml2Quantity_time(self.tauRise) # validate type Nml2Quantity_time
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(ExpTwoSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ExpTwoSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ExpTwoSynapse
class ExpOneSynapse(BaseConductanceBasedSynapse):
subclass = None
superclass = BaseConductanceBasedSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, erev=None, gbase=None, tauDecay=None):
super(ExpOneSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, erev, gbase, )
self.tauDecay = _cast(None, tauDecay)
pass
def factory(*args_, **kwargs_):
if ExpOneSynapse.subclass:
return ExpOneSynapse.subclass(*args_, **kwargs_)
else:
return ExpOneSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tauDecay(self): return self.tauDecay
def set_tauDecay(self, tauDecay): self.tauDecay = tauDecay
def validate_Nml2Quantity_time(self, value):
# Validate type Nml2Quantity_time, a restriction on xs:string.
pass
def hasContent_(self):
if (
super(ExpOneSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExpOneSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExpOneSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExpOneSynapse'):
super(ExpOneSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ExpOneSynapse')
if self.tauDecay is not None and 'tauDecay' not in already_processed:
already_processed.add('tauDecay')
outfile.write(' tauDecay=%s' % (quote_attrib(self.tauDecay), ))
def exportChildren(self, outfile, level, namespace_='', name_='ExpOneSynapse', fromsubclass_=False, pretty_print=True):
super(ExpOneSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ExpOneSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tauDecay is not None and 'tauDecay' not in already_processed:
already_processed.add('tauDecay')
showIndent(outfile, level)
outfile.write('tauDecay="%s",\n' % (self.tauDecay,))
super(ExpOneSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ExpOneSynapse, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tauDecay', node)
if value is not None and 'tauDecay' not in already_processed:
already_processed.add('tauDecay')
self.tauDecay = value
self.validate_Nml2Quantity_time(self.tauDecay) # validate type Nml2Quantity_time
super(ExpOneSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ExpOneSynapse, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ExpOneSynapse
class IF_curr_exp(basePyNNIaFCell):
subclass = None
superclass = basePyNNIaFCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None):
super(IF_curr_exp, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, tau_refrac, v_thresh, tau_m, v_reset, v_rest, )
pass
def factory(*args_, **kwargs_):
if IF_curr_exp.subclass:
return IF_curr_exp.subclass(*args_, **kwargs_)
else:
return IF_curr_exp(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(IF_curr_exp, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IF_curr_exp', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IF_curr_exp')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IF_curr_exp'):
super(IF_curr_exp, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IF_curr_exp')
def exportChildren(self, outfile, level, namespace_='', name_='IF_curr_exp', fromsubclass_=False, pretty_print=True):
super(IF_curr_exp, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IF_curr_exp'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(IF_curr_exp, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IF_curr_exp, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(IF_curr_exp, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IF_curr_exp, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IF_curr_exp
class IF_curr_alpha(basePyNNIaFCell):
subclass = None
superclass = basePyNNIaFCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None):
super(IF_curr_alpha, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, tau_refrac, v_thresh, tau_m, v_reset, v_rest, )
pass
def factory(*args_, **kwargs_):
if IF_curr_alpha.subclass:
return IF_curr_alpha.subclass(*args_, **kwargs_)
else:
return IF_curr_alpha(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(IF_curr_alpha, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IF_curr_alpha', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IF_curr_alpha')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IF_curr_alpha'):
super(IF_curr_alpha, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IF_curr_alpha')
def exportChildren(self, outfile, level, namespace_='', name_='IF_curr_alpha', fromsubclass_=False, pretty_print=True):
super(IF_curr_alpha, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IF_curr_alpha'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(IF_curr_alpha, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IF_curr_alpha, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(IF_curr_alpha, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IF_curr_alpha, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IF_curr_alpha
class basePyNNIaFCondCell(basePyNNIaFCell):
subclass = None
superclass = basePyNNIaFCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None, e_rev_I=None, e_rev_E=None, extensiontype_=None):
super(basePyNNIaFCondCell, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, tau_refrac, v_thresh, tau_m, v_reset, v_rest, extensiontype_, )
self.e_rev_I = _cast(float, e_rev_I)
self.e_rev_E = _cast(float, e_rev_E)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if basePyNNIaFCondCell.subclass:
return basePyNNIaFCondCell.subclass(*args_, **kwargs_)
else:
return basePyNNIaFCondCell(*args_, **kwargs_)
factory = staticmethod(factory)
def get_e_rev_I(self): return self.e_rev_I
def set_e_rev_I(self, e_rev_I): self.e_rev_I = e_rev_I
def get_e_rev_E(self): return self.e_rev_E
def set_e_rev_E(self, e_rev_E): self.e_rev_E = e_rev_E
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(basePyNNIaFCondCell, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='basePyNNIaFCondCell', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='basePyNNIaFCondCell')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='basePyNNIaFCondCell'):
super(basePyNNIaFCondCell, self).exportAttributes(outfile, level, already_processed, namespace_, name_='basePyNNIaFCondCell')
if self.e_rev_I is not None and 'e_rev_I' not in already_processed:
already_processed.add('e_rev_I')
outfile.write(' e_rev_I="%s"' % self.gds_format_double(self.e_rev_I, input_name='e_rev_I'))
if self.e_rev_E is not None and 'e_rev_E' not in already_processed:
already_processed.add('e_rev_E')
outfile.write(' e_rev_E="%s"' % self.gds_format_double(self.e_rev_E, input_name='e_rev_E'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='basePyNNIaFCondCell', fromsubclass_=False, pretty_print=True):
super(basePyNNIaFCondCell, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='basePyNNIaFCondCell'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.e_rev_I is not None and 'e_rev_I' not in already_processed:
already_processed.add('e_rev_I')
showIndent(outfile, level)
outfile.write('e_rev_I=%e,\n' % (self.e_rev_I,))
if self.e_rev_E is not None and 'e_rev_E' not in already_processed:
already_processed.add('e_rev_E')
showIndent(outfile, level)
outfile.write('e_rev_E=%e,\n' % (self.e_rev_E,))
super(basePyNNIaFCondCell, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(basePyNNIaFCondCell, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('e_rev_I', node)
if value is not None and 'e_rev_I' not in already_processed:
already_processed.add('e_rev_I')
try:
self.e_rev_I = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev_I): %s' % exp)
value = find_attr_value_('e_rev_E', node)
if value is not None and 'e_rev_E' not in already_processed:
already_processed.add('e_rev_E')
try:
self.e_rev_E = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (e_rev_E): %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(basePyNNIaFCondCell, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(basePyNNIaFCondCell, self).buildChildren(child_, node, nodeName_, True)
pass
# end class basePyNNIaFCondCell
class BlockingPlasticSynapse(ExpTwoSynapse):
subclass = None
superclass = ExpTwoSynapse
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, erev=None, gbase=None, tauDecay=None, tauRise=None, plasticityMechanism=None, blockMechanism=None):
super(BlockingPlasticSynapse, self).__init__(id, neuroLexId, name, metaid, notes, annotation, erev, gbase, tauDecay, tauRise, )
self.plasticityMechanism = plasticityMechanism
self.blockMechanism = blockMechanism
def factory(*args_, **kwargs_):
if BlockingPlasticSynapse.subclass:
return BlockingPlasticSynapse.subclass(*args_, **kwargs_)
else:
return BlockingPlasticSynapse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_plasticityMechanism(self): return self.plasticityMechanism
def set_plasticityMechanism(self, plasticityMechanism): self.plasticityMechanism = plasticityMechanism
def get_blockMechanism(self): return self.blockMechanism
def set_blockMechanism(self, blockMechanism): self.blockMechanism = blockMechanism
def hasContent_(self):
if (
self.plasticityMechanism is not None or
self.blockMechanism is not None or
super(BlockingPlasticSynapse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BlockingPlasticSynapse', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BlockingPlasticSynapse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BlockingPlasticSynapse'):
super(BlockingPlasticSynapse, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BlockingPlasticSynapse')
def exportChildren(self, outfile, level, namespace_='', name_='BlockingPlasticSynapse', fromsubclass_=False, pretty_print=True):
super(BlockingPlasticSynapse, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.plasticityMechanism is not None:
self.plasticityMechanism.export(outfile, level, namespace_, name_='plasticityMechanism', pretty_print=pretty_print)
if self.blockMechanism is not None:
self.blockMechanism.export(outfile, level, namespace_, name_='blockMechanism', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='BlockingPlasticSynapse'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(BlockingPlasticSynapse, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BlockingPlasticSynapse, self).exportLiteralChildren(outfile, level, name_)
if self.plasticityMechanism is not None:
showIndent(outfile, level)
outfile.write('plasticityMechanism=model_.PlasticityMechanism(\n')
self.plasticityMechanism.exportLiteral(outfile, level, name_='plasticityMechanism')
showIndent(outfile, level)
outfile.write('),\n')
if self.blockMechanism is not None:
showIndent(outfile, level)
outfile.write('blockMechanism=model_.BlockMechanism(\n')
self.blockMechanism.exportLiteral(outfile, level, name_='blockMechanism')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(BlockingPlasticSynapse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'plasticityMechanism':
obj_ = PlasticityMechanism.factory()
obj_.build(child_)
self.set_plasticityMechanism(obj_)
elif nodeName_ == 'blockMechanism':
obj_ = BlockMechanism.factory()
obj_.build(child_)
self.set_blockMechanism(obj_)
super(BlockingPlasticSynapse, self).buildChildren(child_, node, nodeName_, True)
# end class BlockingPlasticSynapse
class EIF_cond_alpha_isfa_ista(basePyNNIaFCondCell):
subclass = None
superclass = basePyNNIaFCondCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None, e_rev_I=None, e_rev_E=None, a=None, delta_T=None, b=None, v_spike=None, tau_w=None):
super(EIF_cond_alpha_isfa_ista, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, tau_refrac, v_thresh, tau_m, v_reset, v_rest, e_rev_I, e_rev_E, )
self.a = _cast(float, a)
self.delta_T = _cast(float, delta_T)
self.b = _cast(float, b)
self.v_spike = _cast(float, v_spike)
self.tau_w = _cast(float, tau_w)
pass
def factory(*args_, **kwargs_):
if EIF_cond_alpha_isfa_ista.subclass:
return EIF_cond_alpha_isfa_ista.subclass(*args_, **kwargs_)
else:
return EIF_cond_alpha_isfa_ista(*args_, **kwargs_)
factory = staticmethod(factory)
def get_a(self): return self.a
def set_a(self, a): self.a = a
def get_delta_T(self): return self.delta_T
def set_delta_T(self, delta_T): self.delta_T = delta_T
def get_b(self): return self.b
def set_b(self, b): self.b = b
def get_v_spike(self): return self.v_spike
def set_v_spike(self, v_spike): self.v_spike = v_spike
def get_tau_w(self): return self.tau_w
def set_tau_w(self, tau_w): self.tau_w = tau_w
def hasContent_(self):
if (
super(EIF_cond_alpha_isfa_ista, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EIF_cond_alpha_isfa_ista', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EIF_cond_alpha_isfa_ista')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EIF_cond_alpha_isfa_ista'):
super(EIF_cond_alpha_isfa_ista, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EIF_cond_alpha_isfa_ista')
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
outfile.write(' a="%s"' % self.gds_format_double(self.a, input_name='a'))
if self.delta_T is not None and 'delta_T' not in already_processed:
already_processed.add('delta_T')
outfile.write(' delta_T="%s"' % self.gds_format_double(self.delta_T, input_name='delta_T'))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
outfile.write(' b="%s"' % self.gds_format_double(self.b, input_name='b'))
if self.v_spike is not None and 'v_spike' not in already_processed:
already_processed.add('v_spike')
outfile.write(' v_spike="%s"' % self.gds_format_double(self.v_spike, input_name='v_spike'))
if self.tau_w is not None and 'tau_w' not in already_processed:
already_processed.add('tau_w')
outfile.write(' tau_w="%s"' % self.gds_format_double(self.tau_w, input_name='tau_w'))
def exportChildren(self, outfile, level, namespace_='', name_='EIF_cond_alpha_isfa_ista', fromsubclass_=False, pretty_print=True):
super(EIF_cond_alpha_isfa_ista, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EIF_cond_alpha_isfa_ista'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
showIndent(outfile, level)
outfile.write('a=%e,\n' % (self.a,))
if self.delta_T is not None and 'delta_T' not in already_processed:
already_processed.add('delta_T')
showIndent(outfile, level)
outfile.write('delta_T=%e,\n' % (self.delta_T,))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
showIndent(outfile, level)
outfile.write('b=%e,\n' % (self.b,))
if self.v_spike is not None and 'v_spike' not in already_processed:
already_processed.add('v_spike')
showIndent(outfile, level)
outfile.write('v_spike=%e,\n' % (self.v_spike,))
if self.tau_w is not None and 'tau_w' not in already_processed:
already_processed.add('tau_w')
showIndent(outfile, level)
outfile.write('tau_w=%e,\n' % (self.tau_w,))
super(EIF_cond_alpha_isfa_ista, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EIF_cond_alpha_isfa_ista, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('a', node)
if value is not None and 'a' not in already_processed:
already_processed.add('a')
try:
self.a = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (a): %s' % exp)
value = find_attr_value_('delta_T', node)
if value is not None and 'delta_T' not in already_processed:
already_processed.add('delta_T')
try:
self.delta_T = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (delta_T): %s' % exp)
value = find_attr_value_('b', node)
if value is not None and 'b' not in already_processed:
already_processed.add('b')
try:
self.b = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (b): %s' % exp)
value = find_attr_value_('v_spike', node)
if value is not None and 'v_spike' not in already_processed:
already_processed.add('v_spike')
try:
self.v_spike = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (v_spike): %s' % exp)
value = find_attr_value_('tau_w', node)
if value is not None and 'tau_w' not in already_processed:
already_processed.add('tau_w')
try:
self.tau_w = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (tau_w): %s' % exp)
super(EIF_cond_alpha_isfa_ista, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(EIF_cond_alpha_isfa_ista, self).buildChildren(child_, node, nodeName_, True)
pass
# end class EIF_cond_alpha_isfa_ista
class EIF_cond_exp_isfa_ista(basePyNNIaFCondCell):
subclass = None
superclass = basePyNNIaFCondCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None, e_rev_I=None, e_rev_E=None, a=None, delta_T=None, b=None, v_spike=None, tau_w=None):
super(EIF_cond_exp_isfa_ista, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, tau_refrac, v_thresh, tau_m, v_reset, v_rest, e_rev_I, e_rev_E, )
self.a = _cast(float, a)
self.delta_T = _cast(float, delta_T)
self.b = _cast(float, b)
self.v_spike = _cast(float, v_spike)
self.tau_w = _cast(float, tau_w)
pass
def factory(*args_, **kwargs_):
if EIF_cond_exp_isfa_ista.subclass:
return EIF_cond_exp_isfa_ista.subclass(*args_, **kwargs_)
else:
return EIF_cond_exp_isfa_ista(*args_, **kwargs_)
factory = staticmethod(factory)
def get_a(self): return self.a
def set_a(self, a): self.a = a
def get_delta_T(self): return self.delta_T
def set_delta_T(self, delta_T): self.delta_T = delta_T
def get_b(self): return self.b
def set_b(self, b): self.b = b
def get_v_spike(self): return self.v_spike
def set_v_spike(self, v_spike): self.v_spike = v_spike
def get_tau_w(self): return self.tau_w
def set_tau_w(self, tau_w): self.tau_w = tau_w
def hasContent_(self):
if (
super(EIF_cond_exp_isfa_ista, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EIF_cond_exp_isfa_ista', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EIF_cond_exp_isfa_ista')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EIF_cond_exp_isfa_ista'):
super(EIF_cond_exp_isfa_ista, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EIF_cond_exp_isfa_ista')
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
outfile.write(' a="%s"' % self.gds_format_double(self.a, input_name='a'))
if self.delta_T is not None and 'delta_T' not in already_processed:
already_processed.add('delta_T')
outfile.write(' delta_T="%s"' % self.gds_format_double(self.delta_T, input_name='delta_T'))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
outfile.write(' b="%s"' % self.gds_format_double(self.b, input_name='b'))
if self.v_spike is not None and 'v_spike' not in already_processed:
already_processed.add('v_spike')
outfile.write(' v_spike="%s"' % self.gds_format_double(self.v_spike, input_name='v_spike'))
if self.tau_w is not None and 'tau_w' not in already_processed:
already_processed.add('tau_w')
outfile.write(' tau_w="%s"' % self.gds_format_double(self.tau_w, input_name='tau_w'))
def exportChildren(self, outfile, level, namespace_='', name_='EIF_cond_exp_isfa_ista', fromsubclass_=False, pretty_print=True):
super(EIF_cond_exp_isfa_ista, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EIF_cond_exp_isfa_ista'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.a is not None and 'a' not in already_processed:
already_processed.add('a')
showIndent(outfile, level)
outfile.write('a=%e,\n' % (self.a,))
if self.delta_T is not None and 'delta_T' not in already_processed:
already_processed.add('delta_T')
showIndent(outfile, level)
outfile.write('delta_T=%e,\n' % (self.delta_T,))
if self.b is not None and 'b' not in already_processed:
already_processed.add('b')
showIndent(outfile, level)
outfile.write('b=%e,\n' % (self.b,))
if self.v_spike is not None and 'v_spike' not in already_processed:
already_processed.add('v_spike')
showIndent(outfile, level)
outfile.write('v_spike=%e,\n' % (self.v_spike,))
if self.tau_w is not None and 'tau_w' not in already_processed:
already_processed.add('tau_w')
showIndent(outfile, level)
outfile.write('tau_w=%e,\n' % (self.tau_w,))
super(EIF_cond_exp_isfa_ista, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EIF_cond_exp_isfa_ista, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('a', node)
if value is not None and 'a' not in already_processed:
already_processed.add('a')
try:
self.a = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (a): %s' % exp)
value = find_attr_value_('delta_T', node)
if value is not None and 'delta_T' not in already_processed:
already_processed.add('delta_T')
try:
self.delta_T = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (delta_T): %s' % exp)
value = find_attr_value_('b', node)
if value is not None and 'b' not in already_processed:
already_processed.add('b')
try:
self.b = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (b): %s' % exp)
value = find_attr_value_('v_spike', node)
if value is not None and 'v_spike' not in already_processed:
already_processed.add('v_spike')
try:
self.v_spike = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (v_spike): %s' % exp)
value = find_attr_value_('tau_w', node)
if value is not None and 'tau_w' not in already_processed:
already_processed.add('tau_w')
try:
self.tau_w = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (tau_w): %s' % exp)
super(EIF_cond_exp_isfa_ista, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(EIF_cond_exp_isfa_ista, self).buildChildren(child_, node, nodeName_, True)
pass
# end class EIF_cond_exp_isfa_ista
class IF_cond_exp(basePyNNIaFCondCell):
subclass = None
superclass = basePyNNIaFCondCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None, e_rev_I=None, e_rev_E=None):
super(IF_cond_exp, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, tau_refrac, v_thresh, tau_m, v_reset, v_rest, e_rev_I, e_rev_E, )
pass
def factory(*args_, **kwargs_):
if IF_cond_exp.subclass:
return IF_cond_exp.subclass(*args_, **kwargs_)
else:
return IF_cond_exp(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(IF_cond_exp, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IF_cond_exp', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IF_cond_exp')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IF_cond_exp'):
super(IF_cond_exp, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IF_cond_exp')
def exportChildren(self, outfile, level, namespace_='', name_='IF_cond_exp', fromsubclass_=False, pretty_print=True):
super(IF_cond_exp, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IF_cond_exp'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(IF_cond_exp, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IF_cond_exp, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(IF_cond_exp, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IF_cond_exp, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IF_cond_exp
class IF_cond_alpha(basePyNNIaFCondCell):
subclass = None
superclass = basePyNNIaFCondCell
def __init__(self, id=None, neuroLexId=None, name=None, metaid=None, notes=None, annotation=None, tau_syn_I=None, tau_syn_E=None, i_offset=None, cm=None, v_init=None, tau_refrac=None, v_thresh=None, tau_m=None, v_reset=None, v_rest=None, e_rev_I=None, e_rev_E=None):
super(IF_cond_alpha, self).__init__(id, neuroLexId, name, metaid, notes, annotation, tau_syn_I, tau_syn_E, i_offset, cm, v_init, tau_refrac, v_thresh, tau_m, v_reset, v_rest, e_rev_I, e_rev_E, )
pass
def factory(*args_, **kwargs_):
if IF_cond_alpha.subclass:
return IF_cond_alpha.subclass(*args_, **kwargs_)
else:
return IF_cond_alpha(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(IF_cond_alpha, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='IF_cond_alpha', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IF_cond_alpha')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IF_cond_alpha'):
super(IF_cond_alpha, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IF_cond_alpha')
def exportChildren(self, outfile, level, namespace_='', name_='IF_cond_alpha', fromsubclass_=False, pretty_print=True):
super(IF_cond_alpha, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='IF_cond_alpha'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(IF_cond_alpha, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IF_cond_alpha, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(IF_cond_alpha, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(IF_cond_alpha, self).buildChildren(child_, node, nodeName_, True)
pass
# end class IF_cond_alpha
GDSClassesMapping = {
'intracellularProperties': IntracellularProperties,
'inhomogeneousParam': InhomogeneousParam,
'q10Settings': Q10Settings,
'spikeGenerator': SpikeGenerator,
'distal': DistalDetails,
'random': RandomLayout,
'variableParameter': VariableParameter,
'subTree': SubTree,
'gateHHtauInf': GateHHTauInf,
'inputList': InputList,
'specificCapacitance': ValueAcrossSegOrSegGroup,
'ionChannel': IonChannel,
'gateHHratesTau': GateHHRatesTau,
'biophysicalProperties': BiophysicalProperties,
'membraneProperties': MembraneProperties,
'proximal': ProximalDetails,
'path': Path,
'morphology': Morphology,
'iafCell': IaFCell,
'iafTauRefCell': IaFTauRefCell,
'species': Species,
'resistivity': ValueAcrossSegOrSegGroup,
'member': Member,
'inhomogeneousValue': InhomogeneousValue,
'spikeGeneratorRandom': SpikeGeneratorRandom,
'sineGenerator': SineGenerator,
'expCondSynapse': ExpCondSynapse,
'network': Network,
'reverseRate': HHRate,
'decayingPoolConcentrationModel': DecayingPoolConcentrationModel,
'segment': Segment,
'rampGenerator': RampGenerator,
'cellSet': CellSet,
'gateHHrates': GateHHRates,
'cell': Cell,
'to': SegmentEndPoint,
'voltageClamp': VoltageClamp,
'initMembPotential': ValueAcrossSegOrSegGroup,
'projection': Projection,
'spike': Spike,
'gate': GateHHUndetermined,
'steadyState': HHVariable,
'include': Include,
'forwardRate': HHRate,
'location': Location,
'synapticConnection': SynapticConnection,
'neuroml': NeuroMLDocument,
'from': SegmentEndPoint,
'blockMechanism': BlockMechanism,
'gateHHratesInf': GateHHRatesInf,
'parent': SegmentParent,
'plasticityMechanism': PlasticityMechanism,
'spikeThresh': ValueAcrossSegOrSegGroup,
'annotation': Annotation,
'instance': Instance,
'adExIaFCell': AdExIaFCell,
'grid': GridLayout,
'alphaCondSynapse': AlphaCondSynapse,
'izhikevichCell': IzhikevichCell,
'input': Input,
'iafTauCell': IaFTauCell,
'segmentGroup': SegmentGroup,
'expTwoSynapse': ExpTwoSynapse,
'pulseGenerator': PulseGenerator,
'iafRefCell': IaFRefCell,
'structure': SpaceStructure,
'spikeArray': SpikeArray,
'unstructured': UnstructuredLayout,
'blockingPlasticSynapse': BlockingPlasticSynapse,
'reversalPotential': ReversalPotential,
'channelPopulation': ChannelPopulation,
'alphaCurrSynapse': AlphaCurrSynapse,
'region': Region,
'space': Space,
'expCurrSynapse': ExpCurrSynapse,
'population': Population,
'timeCourse': HHTime,
'explicitInput': ExplicitInput,
'extracellularProperties': ExtracellularPropertiesLocal,
'connection': Connection,
'spikeGeneratorPoisson': SpikeGeneratorPoisson,
'channelDensity': ChannelDensity,
'expOneSynapse': ExpOneSynapse,
'layout': Layout,
'baseCell': BaseCell,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Annotation'
rootClass = Annotation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(
## sys.stdout, 0, name_=rootTag,
## namespacedef_='',
## pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Annotation'
rootClass = Annotation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
## content = etree_.tostring(
## rootElement, pretty_print=True,
## xml_declaration=True, encoding="utf-8")
## sys.stdout.write(content)
## sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = Annotation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(
## sys.stdout, 0, name_="Annotation",
## namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Annotation'
rootClass = Annotation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from generated_neuroml import *\n\n')
## sys.stdout.write('import generated_neuroml as model_\n\n')
## sys.stdout.write('rootObj = model_.rootTag(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
## sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AdExIaFCell",
"AlphaCondSynapse",
"AlphaCurrSynapse",
"Annotation",
"Base",
"BaseCell",
"BaseConductanceBasedSynapse",
"BasePynnSynapse",
"BaseSynapse",
"BiophysicalProperties",
"BlockMechanism",
"BlockingPlasticSynapse",
"Cell",
"CellSet",
"ChannelDensity",
"ChannelPopulation",
"ComponentType",
"ConcentrationModel_D",
"Connection",
"DecayingPoolConcentrationModel",
"DistalDetails",
"EIF_cond_alpha_isfa_ista",
"EIF_cond_exp_isfa_ista",
"ExpCondSynapse",
"ExpCurrSynapse",
"ExpOneSynapse",
"ExpTwoSynapse",
"ExplicitInput",
"ExtracellularProperties",
"ExtracellularPropertiesLocal",
"GateHHRates",
"GateHHRatesInf",
"GateHHRatesTau",
"GateHHTauInf",
"GateHHUndetermined",
"GridLayout",
"HHRate",
"HHTime",
"HHVariable",
"HH_cond_exp",
"IF_cond_alpha",
"IF_cond_exp",
"IF_curr_alpha",
"IF_curr_exp",
"IaFCell",
"IaFRefCell",
"IaFTauCell",
"IaFTauRefCell",
"Include",
"IncludeType",
"InhomogeneousParam",
"InhomogeneousValue",
"Input",
"InputList",
"Instance",
"IntracellularProperties",
"IonChannel",
"IzhikevichCell",
"Layout",
"Location",
"Member",
"MembraneProperties",
"Morphology",
"Network",
"NeuroMLDocument",
"Path",
"PlasticityMechanism",
"Point3DWithDiam",
"Population",
"Projection",
"ProximalDetails",
"PulseGenerator",
"Q10Settings",
"RampGenerator",
"RandomLayout",
"ReactionScheme",
"Region",
"ReversalPotential",
"Segment",
"SegmentEndPoint",
"SegmentGroup",
"SegmentParent",
"SineGenerator",
"Space",
"SpaceStructure",
"Species",
"Spike",
"SpikeArray",
"SpikeGenerator",
"SpikeGeneratorPoisson",
"SpikeGeneratorRandom",
"SpikeSourcePoisson",
"Standalone",
"SubTree",
"SynapticConnection",
"UnstructuredLayout",
"ValueAcrossSegOrSegGroup",
"VariableParameter",
"VoltageClamp",
"basePyNNCell",
"basePyNNIaFCell",
"basePyNNIaFCondCell"
]
| gpl-2.0 | -853,355,551,470,653,800 | 48.96823 | 951 | 0.623654 | false |
danielsen/arf | test/arf_test.py | 1 | 2110 | #!/usr/bin/env python
# tests for the arf package
import sys
sys.path.insert(0, "../arf/")
import unittest
import arf
class ARFTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.message = arf.load_arf("./resources/sample_arf_message.txt")
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_header_to_camelcase(self):
self.assertEqual(self.message._header_to_camelcase("Return-Path"),
"ReturnPath")
def test_get_message_headers(self):
message_headers = self.message.get_message_headers()
self.assertEqual(len(message_headers), 8)
received_headers = [i for i in message_headers if 'Received' in i]
self.assertEqual(len(received_headers), 2)
def test_get_feedback_report(self):
feedback_report = self.message.get_feedback_report()
self.assertEqual(feedback_report.get_feedback_type(), "abuse")
self.assertEqual(feedback_report.get_user_agent(),
"SomeGenerator/1.0")
self.assertEqual(feedback_report.get_version(), "1")
self.assertEqual(feedback_report.get_original_mail_from(),
"<[email protected]>")
self.assertEqual(feedback_report.get_original_envelope_id(),
None)
self.assertEqual(feedback_report.get_original_rcpt_to(),
"<[email protected]>")
self.assertEqual(feedback_report.get_arrival_date(),
"Thu, 8 Mar 2005 14:00:00 EDT")
self.assertEqual(feedback_report.get_reporting_mta(),
"dns; mail.example.com")
self.assertEqual(feedback_report.get_source_ip(),
"192.0.2.1")
self.assertEqual(feedback_report.get_reported_domain(),
"example.net")
self.assertEqual(feedback_report.get_reported_uri(),
"http://example.net/earn_money.html")
def test_get_original_message_headers(self):
message_headers = self.message.get_original_message_headers()
self.assertEqual(len(message_headers), 8)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -8,183,541,207,676,271,000 | 36.017544 | 74 | 0.641706 | false |
paulkocialkowski/systemd | test/rule-syntax-check.py | 14 | 2664 | #!/usr/bin/env python3
# Simple udev rules syntax checker
#
# (C) 2010 Canonical Ltd.
# Author: Martin Pitt <[email protected]>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import os
from glob import glob
if len(sys.argv) > 1:
# explicit rule file list
rules_files = sys.argv[1:]
else:
# take them from the build dir
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
rules_dir = os.path.join(os.environ.get('top_srcdir', root_dir), 'rules')
if not os.path.isdir(rules_dir):
sys.stderr.write('No rules files given, and %s does not exist, aborting' % rules_dir)
sys.exit(2)
rules_files = glob(os.path.join(rules_dir, '*.rules'))
no_args_tests = re.compile(r'(ACTION|DEVPATH|KERNELS?|NAME|SYMLINK|SUBSYSTEMS?|DRIVERS?|TAG|RESULT|TEST)\s*(?:=|!)=\s*"([^"]*)"$')
args_tests = re.compile(r'(ATTRS?|ENV|TEST){([a-zA-Z0-9/_.*%-]+)}\s*(?:=|!)=\s*"([^"]*)"$')
no_args_assign = re.compile(r'(NAME|SYMLINK|OWNER|GROUP|MODE|TAG|PROGRAM|RUN|LABEL|GOTO|OPTIONS|IMPORT)\s*(?:\+=|:=|=)\s*"([^"]*)"$')
args_assign = re.compile(r'(ATTR|ENV|IMPORT|RUN){([a-zA-Z0-9/_.*%-]+)}\s*(=|\+=)\s*"([^"]*)"$')
result = 0
buffer = ''
for path in rules_files:
lineno = 0
for line in open(path):
lineno += 1
# handle line continuation
if line.endswith('\\\n'):
buffer += line[:-2]
continue
else:
line = buffer + line
buffer = ''
# filter out comments and empty lines
line = line.strip()
if not line or line.startswith('#'):
continue
for clause in line.split(','):
clause = clause.strip()
if not (no_args_tests.match(clause) or args_tests.match(clause) or
no_args_assign.match(clause) or args_assign.match(clause)):
print('Invalid line %s:%i: %s' % (path, lineno, line))
print(' clause: %s' % clause)
print('')
result = 1
break
sys.exit(result)
| gpl-2.0 | 5,095,719,038,793,636,000 | 35.493151 | 133 | 0.611111 | false |
GDGLima/contentbox | third_party/unidecode/x0d6.py | 4 | 5023 | data = (
'hyeo', # 0x00
'hyeog', # 0x01
'hyeogg', # 0x02
'hyeogs', # 0x03
'hyeon', # 0x04
'hyeonj', # 0x05
'hyeonh', # 0x06
'hyeod', # 0x07
'hyeol', # 0x08
'hyeolg', # 0x09
'hyeolm', # 0x0a
'hyeolb', # 0x0b
'hyeols', # 0x0c
'hyeolt', # 0x0d
'hyeolp', # 0x0e
'hyeolh', # 0x0f
'hyeom', # 0x10
'hyeob', # 0x11
'hyeobs', # 0x12
'hyeos', # 0x13
'hyeoss', # 0x14
'hyeong', # 0x15
'hyeoj', # 0x16
'hyeoc', # 0x17
'hyeok', # 0x18
'hyeot', # 0x19
'hyeop', # 0x1a
'hyeoh', # 0x1b
'hye', # 0x1c
'hyeg', # 0x1d
'hyegg', # 0x1e
'hyegs', # 0x1f
'hyen', # 0x20
'hyenj', # 0x21
'hyenh', # 0x22
'hyed', # 0x23
'hyel', # 0x24
'hyelg', # 0x25
'hyelm', # 0x26
'hyelb', # 0x27
'hyels', # 0x28
'hyelt', # 0x29
'hyelp', # 0x2a
'hyelh', # 0x2b
'hyem', # 0x2c
'hyeb', # 0x2d
'hyebs', # 0x2e
'hyes', # 0x2f
'hyess', # 0x30
'hyeng', # 0x31
'hyej', # 0x32
'hyec', # 0x33
'hyek', # 0x34
'hyet', # 0x35
'hyep', # 0x36
'hyeh', # 0x37
'ho', # 0x38
'hog', # 0x39
'hogg', # 0x3a
'hogs', # 0x3b
'hon', # 0x3c
'honj', # 0x3d
'honh', # 0x3e
'hod', # 0x3f
'hol', # 0x40
'holg', # 0x41
'holm', # 0x42
'holb', # 0x43
'hols', # 0x44
'holt', # 0x45
'holp', # 0x46
'holh', # 0x47
'hom', # 0x48
'hob', # 0x49
'hobs', # 0x4a
'hos', # 0x4b
'hoss', # 0x4c
'hong', # 0x4d
'hoj', # 0x4e
'hoc', # 0x4f
'hok', # 0x50
'hot', # 0x51
'hop', # 0x52
'hoh', # 0x53
'hwa', # 0x54
'hwag', # 0x55
'hwagg', # 0x56
'hwags', # 0x57
'hwan', # 0x58
'hwanj', # 0x59
'hwanh', # 0x5a
'hwad', # 0x5b
'hwal', # 0x5c
'hwalg', # 0x5d
'hwalm', # 0x5e
'hwalb', # 0x5f
'hwals', # 0x60
'hwalt', # 0x61
'hwalp', # 0x62
'hwalh', # 0x63
'hwam', # 0x64
'hwab', # 0x65
'hwabs', # 0x66
'hwas', # 0x67
'hwass', # 0x68
'hwang', # 0x69
'hwaj', # 0x6a
'hwac', # 0x6b
'hwak', # 0x6c
'hwat', # 0x6d
'hwap', # 0x6e
'hwah', # 0x6f
'hwae', # 0x70
'hwaeg', # 0x71
'hwaegg', # 0x72
'hwaegs', # 0x73
'hwaen', # 0x74
'hwaenj', # 0x75
'hwaenh', # 0x76
'hwaed', # 0x77
'hwael', # 0x78
'hwaelg', # 0x79
'hwaelm', # 0x7a
'hwaelb', # 0x7b
'hwaels', # 0x7c
'hwaelt', # 0x7d
'hwaelp', # 0x7e
'hwaelh', # 0x7f
'hwaem', # 0x80
'hwaeb', # 0x81
'hwaebs', # 0x82
'hwaes', # 0x83
'hwaess', # 0x84
'hwaeng', # 0x85
'hwaej', # 0x86
'hwaec', # 0x87
'hwaek', # 0x88
'hwaet', # 0x89
'hwaep', # 0x8a
'hwaeh', # 0x8b
'hoe', # 0x8c
'hoeg', # 0x8d
'hoegg', # 0x8e
'hoegs', # 0x8f
'hoen', # 0x90
'hoenj', # 0x91
'hoenh', # 0x92
'hoed', # 0x93
'hoel', # 0x94
'hoelg', # 0x95
'hoelm', # 0x96
'hoelb', # 0x97
'hoels', # 0x98
'hoelt', # 0x99
'hoelp', # 0x9a
'hoelh', # 0x9b
'hoem', # 0x9c
'hoeb', # 0x9d
'hoebs', # 0x9e
'hoes', # 0x9f
'hoess', # 0xa0
'hoeng', # 0xa1
'hoej', # 0xa2
'hoec', # 0xa3
'hoek', # 0xa4
'hoet', # 0xa5
'hoep', # 0xa6
'hoeh', # 0xa7
'hyo', # 0xa8
'hyog', # 0xa9
'hyogg', # 0xaa
'hyogs', # 0xab
'hyon', # 0xac
'hyonj', # 0xad
'hyonh', # 0xae
'hyod', # 0xaf
'hyol', # 0xb0
'hyolg', # 0xb1
'hyolm', # 0xb2
'hyolb', # 0xb3
'hyols', # 0xb4
'hyolt', # 0xb5
'hyolp', # 0xb6
'hyolh', # 0xb7
'hyom', # 0xb8
'hyob', # 0xb9
'hyobs', # 0xba
'hyos', # 0xbb
'hyoss', # 0xbc
'hyong', # 0xbd
'hyoj', # 0xbe
'hyoc', # 0xbf
'hyok', # 0xc0
'hyot', # 0xc1
'hyop', # 0xc2
'hyoh', # 0xc3
'hu', # 0xc4
'hug', # 0xc5
'hugg', # 0xc6
'hugs', # 0xc7
'hun', # 0xc8
'hunj', # 0xc9
'hunh', # 0xca
'hud', # 0xcb
'hul', # 0xcc
'hulg', # 0xcd
'hulm', # 0xce
'hulb', # 0xcf
'huls', # 0xd0
'hult', # 0xd1
'hulp', # 0xd2
'hulh', # 0xd3
'hum', # 0xd4
'hub', # 0xd5
'hubs', # 0xd6
'hus', # 0xd7
'huss', # 0xd8
'hung', # 0xd9
'huj', # 0xda
'huc', # 0xdb
'huk', # 0xdc
'hut', # 0xdd
'hup', # 0xde
'huh', # 0xdf
'hweo', # 0xe0
'hweog', # 0xe1
'hweogg', # 0xe2
'hweogs', # 0xe3
'hweon', # 0xe4
'hweonj', # 0xe5
'hweonh', # 0xe6
'hweod', # 0xe7
'hweol', # 0xe8
'hweolg', # 0xe9
'hweolm', # 0xea
'hweolb', # 0xeb
'hweols', # 0xec
'hweolt', # 0xed
'hweolp', # 0xee
'hweolh', # 0xef
'hweom', # 0xf0
'hweob', # 0xf1
'hweobs', # 0xf2
'hweos', # 0xf3
'hweoss', # 0xf4
'hweong', # 0xf5
'hweoj', # 0xf6
'hweoc', # 0xf7
'hweok', # 0xf8
'hweot', # 0xf9
'hweop', # 0xfa
'hweoh', # 0xfb
'hwe', # 0xfc
'hweg', # 0xfd
'hwegg', # 0xfe
'hwegs', # 0xff
)
| apache-2.0 | -8,548,412,192,690,056,000 | 17.468992 | 19 | 0.437587 | false |
compatibleone/accords-platform | paprocci/test/to-cosacs/cosacs-monitor.py | 3 | 1493 | import os
import socket
import subprocess
import signal
import sys
import logging
import time
import miscco
import platform
# logging system
curros=platform.platform()
currdir=os.getcwd()
hostname=socket.gethostname()
lockfile=currdir + '/lock-' + hostname
logfile=currdir + '/log-cosacs-wrapper-' + hostname
logger = miscco.init_logger(logfile)
logger.info('----------')
logger.info('The current directory is ' + currdir)
logger.info('The current hostname is ' + hostname)
logger.info('The lockfile is in ' + lockfile)
logger.info('The current platform is ' + curros)
def signal_handler(signal, frame):
logger.warn('Signal: i am not dying at all.')
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
miscco.touch_file(lockfile)
while 1:
if miscco.is_cosacs_running(): # COSACS IS running
if os.path.exists(lockfile):
# All right
time.sleep(1)
else:
miscco.touch_file(lockfile)
logger.info('Created lockfile.')
else: # COSACS is NOT running
if os.path.exists(lockfile):
os.remove(lockfile)
logger.info('Removed lockfile.')
else:
# All right
time.sleep(1)
logger.info('Created lockfile: ' + lockfile)
logger.info('Waiting for COSACS to die...')
miscco.block_while_cosacs_runs()
logger.info('COSACS died... Removing lockfile: ' + lockfile)
os.remove(lockfile)
logger.info('Done.')
| apache-2.0 | 3,380,356,045,750,076,400 | 25.660714 | 60 | 0.665774 | false |
pinax/pinax-messages | pinax/messages/forms.py | 1 | 2585 | from django import forms
from django.contrib.auth import get_user_model
from .hooks import hookset
from .models import Message
class UserModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return hookset.display_name(obj)
class UserModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return hookset.display_name(obj)
class NewMessageForm(forms.ModelForm):
subject = forms.CharField()
to_user = UserModelChoiceField(queryset=get_user_model().objects.none())
content = forms.CharField(widget=forms.Textarea)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
self.fields["to_user"].queryset = hookset.get_user_choices(self.user)
if self.initial.get("to_user") is not None:
qs = self.fields["to_user"].queryset.filter(pk=self.initial["to_user"])
self.fields["to_user"].queryset = qs
def save(self, commit=True):
data = self.cleaned_data
return Message.new_message(
self.user, [data["to_user"]], data["subject"], data["content"]
)
class Meta:
model = Message
fields = ["to_user", "subject", "content"]
class NewMessageFormMultiple(forms.ModelForm):
subject = forms.CharField()
to_user = UserModelMultipleChoiceField(get_user_model().objects.none())
content = forms.CharField(widget=forms.Textarea)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
self.fields["to_user"].queryset = hookset.get_user_choices(self.user)
if self.initial.get("to_user") is not None:
qs = self.fields["to_user"].queryset.filter(pk__in=self.initial["to_user"])
self.fields["to_user"].queryset = qs
def save(self, commit=True):
data = self.cleaned_data
return Message.new_message(
self.user, data["to_user"], data["subject"], data["content"]
)
class Meta:
model = Message
fields = ["to_user", "subject", "content"]
class MessageReplyForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.thread = kwargs.pop("thread")
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
def save(self, commit=True):
return Message.new_reply(
self.thread, self.user, self.cleaned_data["content"]
)
class Meta:
model = Message
fields = ["content"]
| mit | -1,233,269,215,145,651,700 | 30.52439 | 87 | 0.624371 | false |
luispedro/milk | milk/supervised/adaboost.py | 2 | 2186 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2012, Luis Pedro Coelho <[email protected]>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
import numpy as np
from .normalise import normaliselabels
from .base import supervised_model
'''
AdaBoost
Simple implementation of Adaboost
Learner
-------
boost_learner
'''
__all__ = [
'boost_learner',
]
def _adaboost(features, labels, base, max_iters):
m = len(features)
D = np.ones(m, dtype=float)
D /= m
Y = np.ones(len(labels), dtype=float)
names = np.array([-1, +1])
Y = names[labels]
H = []
A = []
for t in range(max_iters):
Ht = base.train(features, labels, weights=D)
train_out = np.array(list(map(Ht.apply, features)))
train_out = names[train_out.astype(int)]
Et = np.dot(D, (Y != train_out))
if Et > .5:
# early return
break
At = .5 * np.log((1. + Et) / (1. - Et))
D *= np.exp((-At) * Y * train_out)
D /= np.sum(D)
A.append(At)
H.append(Ht)
return H, A
class boost_model(supervised_model):
def __init__(self, H, A, names):
self.H = H
self.A = A
self.names = names
def apply(self, f):
v = sum((a*h.apply(f)) for h,a in zip(self.H, self.A))
v /= np.sum(self.A)
return self.names[v > .5]
class boost_learner(object):
'''
learner = boost_learner(weak_learner_type(), max_iters=100)
model = learner.train(features, labels)
test = model.apply(f)
AdaBoost learner
Attributes
----------
base : learner
Weak learner
max_iters : integer
Nr of iterations (default: 100)
'''
def __init__(self, base, max_iters=100):
self.base = base
self.max_iters = max_iters
def train(self, features, labels, normalisedlabels=False, names=(0,1), weights=None, **kwargs):
if not normalisedlabels:
labels,names = normaliselabels(labels)
H,A = _adaboost(features, labels, self.base, self.max_iters)
return boost_model(H, A, names)
| mit | 4,411,267,599,867,851,300 | 24.126437 | 99 | 0.575023 | false |
messagebird/python-rest-api | messagebird/conversation_contact.py | 1 | 1043 | from messagebird.base import Base
from messagebird.contact import CustomDetails
class ConversationContact(Base):
def __init__(self):
self.id = None
self.href = None
self.msisdn = None
self.firstName = None
self.lastName = None
self._customDetails = None
self._createdDatetime = None
self._updatedDatetime = None
@property
def customDetails(self):
return self._customDetails
@customDetails.setter
def customDetails(self, value):
self._customDetails = CustomDetails().load(value)
@property
def createdDatetime(self):
return self._createdDatetime
@createdDatetime.setter
def createdDatetime(self, value):
self._createdDatetime = self.value_to_time(value, '%Y-%m-%dT%H:%M:%SZ')
@property
def updatedDatetime(self):
return self._updatedDatetime
@updatedDatetime.setter
def updatedDatetime(self, value):
self._updatedDatetime = self.value_to_time(value, '%Y-%m-%dT%H:%M:%SZ')
| bsd-2-clause | -7,907,751,996,086,822,000 | 25.74359 | 79 | 0.651007 | false |
aitoralmeida/networkx | networkx/algorithms/connectivity/tests/test_connectivity.py | 2 | 13850 | import itertools
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
import networkx as nx
from networkx.algorithms.flow import (edmonds_karp, preflow_push,
shortest_augmenting_path)
flow_funcs = [edmonds_karp, preflow_push, shortest_augmenting_path]
# connectivity functions not imported to the base namespace
from networkx.algorithms.connectivity import (local_edge_connectivity,
local_node_connectivity)
msg = "Assertion failed in function: {0}"
# helper functions for tests
def _generate_no_biconnected(max_attempts=50):
attempts = 0
while True:
G = nx.fast_gnp_random_graph(100, 0.0575)
if nx.is_connected(G) and not nx.is_biconnected(G):
attempts = 0
yield G
else:
if attempts >= max_attempts:
msg = "Tried %d times: no suitable Graph."
raise Exception(msg % max_attempts)
else:
attempts += 1
def test_average_connectivity():
# figure 1 from:
# Beineke, L., O. Oellermann, and R. Pippert (2002). The average
# connectivity of a graph. Discrete mathematics 252(1-3), 31-45
# http://www.sciencedirect.com/science/article/pii/S0012365X01001807
G1 = nx.path_graph(3)
G1.add_edges_from([(1, 3),(1, 4)])
G2 = nx.path_graph(3)
G2.add_edges_from([(1, 3),(1, 4),(0, 3),(0, 4),(3, 4)])
G3 = nx.Graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
assert_equal(nx.average_node_connectivity(G1, **kwargs), 1,
msg=msg.format(flow_func.__name__))
assert_equal(nx.average_node_connectivity(G2, **kwargs), 2.2,
msg=msg.format(flow_func.__name__))
assert_equal(nx.average_node_connectivity(G3, **kwargs), 0,
msg=msg.format(flow_func.__name__))
def test_average_connectivity_directed():
G = nx.DiGraph([(1,3),(1,4),(1,5)])
for flow_func in flow_funcs:
assert_equal(nx.average_node_connectivity(G), 0.25,
msg=msg.format(flow_func.__name__))
def test_articulation_points():
Ggen = _generate_no_biconnected()
for flow_func in flow_funcs:
for i in range(3):
G = next(Ggen)
assert_equal(nx.node_connectivity(G, flow_func=flow_func), 1,
msg=msg.format(flow_func.__name__))
def test_brandes_erlebach():
# Figure 1 chapter 7: Connectivity
# http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
G = nx.Graph()
G.add_edges_from([(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 6), (3, 4),
(3, 6), (4, 6), (4, 7), (5, 7), (6, 8), (6, 9), (7, 8),
(7, 10), (8, 11), (9, 10), (9, 11), (10, 11)])
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
assert_equal(3, local_edge_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(2, local_node_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.node_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.edge_connectivity(G, **kwargs), # node 5 has degree 2
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.node_connectivity(G, **kwargs),
msg=msg.format(flow_func.__name__))
def test_white_harary_1():
# Figure 1b white and harary (2001)
# # http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
# A graph with high adhesion (edge connectivity) and low cohesion
# (vertex connectivity)
G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
G.remove_node(7)
for i in range(4, 7):
G.add_edge(0, i)
G = nx.disjoint_union(G, nx.complete_graph(4))
G.remove_node(G.order() - 1)
for i in range(7, 10):
G.add_edge(0, i)
for flow_func in flow_funcs:
assert_equal(1, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_white_harary_2():
# Figure 8 white and harary (2001)
# # http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
G.add_edge(0, 4)
# kappa <= lambda <= delta
assert_equal(3, min(nx.core_number(G).values()))
for flow_func in flow_funcs:
assert_equal(1, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(1, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_complete_graphs():
for n in range(5, 20, 5):
for flow_func in flow_funcs:
G = nx.complete_graph(n)
assert_equal(n-1, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(n-1, nx.node_connectivity(G.to_directed(),
flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(n-1, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(n-1, nx.edge_connectivity(G.to_directed(),
flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_empty_graphs():
for k in range(5, 25, 5):
G = nx.empty_graph(k)
for flow_func in flow_funcs:
assert_equal(0, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(0, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_petersen():
G = nx.petersen_graph()
for flow_func in flow_funcs:
assert_equal(3, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_tutte():
G = nx.tutte_graph()
for flow_func in flow_funcs:
assert_equal(3, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_dodecahedral():
G = nx.dodecahedral_graph()
for flow_func in flow_funcs:
assert_equal(3, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_octahedral():
G=nx.octahedral_graph()
for flow_func in flow_funcs:
assert_equal(4, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(4, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_icosahedral():
G=nx.icosahedral_graph()
for flow_func in flow_funcs:
assert_equal(5, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(5, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_missing_source():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.node_connectivity, G, 10, 1,
flow_func=flow_func)
def test_missing_target():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.node_connectivity, G, 1, 10,
flow_func=flow_func)
def test_edge_missing_source():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.edge_connectivity, G, 10, 1,
flow_func=flow_func)
def test_edge_missing_target():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.edge_connectivity, G, 1, 10,
flow_func=flow_func)
def test_not_weakly_connected():
G = nx.DiGraph()
G.add_path([1, 2, 3])
G.add_path([4, 5])
for flow_func in flow_funcs:
assert_equal(nx.node_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
assert_equal(nx.edge_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
def test_not_connected():
G = nx.Graph()
G.add_path([1, 2, 3])
G.add_path([4, 5])
for flow_func in flow_funcs:
assert_equal(nx.node_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
assert_equal(nx.edge_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
def test_directed_edge_connectivity():
G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction
D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges
for flow_func in flow_funcs:
assert_equal(1, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(1, local_edge_connectivity(G, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(1, nx.edge_connectivity(G, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.edge_connectivity(D, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(2, local_edge_connectivity(D, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.edge_connectivity(D, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_cutoff():
G = nx.complete_graph(5)
for local_func in [local_edge_connectivity, local_node_connectivity]:
for flow_func in flow_funcs:
if flow_func is preflow_push:
# cutoff is not supported by preflow_push
continue
for cutoff in [3, 2, 1]:
result = local_func(G, 0, 4, flow_func=flow_func, cutoff=cutoff)
assert_equal(cutoff, result,
msg="cutoff error in {0}".format(flow_func.__name__))
def test_invalid_auxiliary():
G = nx.complete_graph(5)
assert_raises(nx.NetworkXError, local_node_connectivity, G, 0, 3,
auxiliary=G)
def test_interface_only_source():
G = nx.complete_graph(5)
for interface_func in [nx.node_connectivity, nx.edge_connectivity]:
assert_raises(nx.NetworkXError, interface_func, G, s=0)
def test_interface_only_target():
G = nx.complete_graph(5)
for interface_func in [nx.node_connectivity, nx.edge_connectivity]:
assert_raises(nx.NetworkXError, interface_func, G, t=3)
def test_edge_connectivity_flow_vs_stoer_wagner():
graph_funcs = [
nx.icosahedral_graph,
nx.octahedral_graph,
nx.dodecahedral_graph,
]
for graph_func in graph_funcs:
G = graph_func()
assert_equal(nx.stoer_wagner(G)[0], nx.edge_connectivity(G))
class TestConnectivityPairs(object):
def test_all_pairs_connectivity_icosahedral(self):
G = nx.icosahedral_graph()
C = nx.all_pairs_node_connectivity(G)
assert_true(all(5 == C[u][v] for u, v in itertools.combinations(G, 2)))
def test_all_pairs_connectivity(self):
G = nx.Graph()
nodes = [0, 1, 2, 3]
G.add_path(nodes)
A = dict.fromkeys(G, dict())
for u, v in itertools.combinations(nodes,2):
A[u][v] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G)
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
def test_all_pairs_connectivity_directed(self):
G = nx.DiGraph()
nodes = [0, 1, 2, 3]
G.add_path(nodes)
A = dict.fromkeys(G, dict())
for u, v in itertools.permutations(nodes, 2):
A[u][v] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G)
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
def test_all_pairs_connectivity_nbunch(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
A = dict.fromkeys(nbunch, dict())
for u, v in itertools.combinations(nbunch, 2):
A[u][v] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G, nbunch=nbunch)
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
def test_all_pairs_connectivity_nbunch_iter(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
A = dict.fromkeys(nbunch, dict())
for u, v in itertools.combinations(nbunch, 2):
A[u][v] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G, nbunch=iter(nbunch))
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
| bsd-3-clause | 4,308,517,269,950,704,600 | 41.097264 | 86 | 0.572924 | false |
fardog/deej | app/deej/wsgi.py | 1 | 1268 | """
WSGI config for utility project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "utility.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | -2,057,336,928,713,251,300 | 38.625 | 79 | 0.791798 | false |
antoyo/qutebrowser | tests/unit/commands/test_userscripts.py | 4 | 7179 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import os
import json
import time
import logging
import signal
import pytest
from PyQt5.QtCore import QFileSystemWatcher
from qutebrowser.commands import userscripts
@pytest.fixture(autouse=True)
def guiprocess_message_mock(message_mock):
message_mock.patch('qutebrowser.misc.guiprocess.message')
return message_mock
@pytest.mark.posix
class TestQtFIFOReader:
@pytest.yield_fixture
def reader(self, tmpdir, qapp):
fifo_path = str(tmpdir / 'fifo')
os.mkfifo(fifo_path) # pylint: disable=no-member,useless-suppression
reader = userscripts._QtFIFOReader(fifo_path)
yield reader
if reader._notifier.isEnabled():
reader.cleanup()
def test_single_line(self, reader, qtbot):
"""Test QSocketNotifier with a single line of data."""
with qtbot.waitSignal(reader.got_line) as blocker:
with open(reader._filepath, 'w', encoding='utf-8') as f:
f.write('foobar\n')
assert blocker.args == ['foobar']
def test_cleanup(self, reader):
assert not reader._fifo.closed
reader.cleanup()
assert reader._fifo.closed
@pytest.fixture(params=[
userscripts._POSIXUserscriptRunner,
userscripts._WindowsUserscriptRunner,
])
def runner(request):
if (os.name != 'posix' and
request.param is userscripts._POSIXUserscriptRunner):
pytest.skip("Requires a POSIX os")
else:
return request.param(0)
def test_command(qtbot, py_proc, runner):
cmd, args = py_proc(r"""
import os
with open(os.environ['QUTE_FIFO'], 'w') as f:
f.write('foo\n')
""")
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args)
runner.store_html('')
runner.store_text('')
assert blocker.args == ['foo']
def test_custom_env(qtbot, monkeypatch, py_proc, runner):
monkeypatch.setenv('QUTEBROWSER_TEST_1', '1')
env = {'QUTEBROWSER_TEST_2': '2'}
cmd, args = py_proc(r"""
import os
import json
env = dict(os.environ)
with open(os.environ['QUTE_FIFO'], 'w') as f:
json.dump(env, f)
f.write('\n')
""")
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args, env=env)
runner.store_html('')
runner.store_text('')
data = blocker.args[0]
ret_env = json.loads(data)
assert 'QUTEBROWSER_TEST_1' in ret_env
assert 'QUTEBROWSER_TEST_2' in ret_env
def test_source(qtbot, py_proc, runner):
"""Make sure the page source is read and cleaned up correctly."""
cmd, args = py_proc(r"""
import os
import json
data = {
'html_file': os.environ['QUTE_HTML'],
'text_file': os.environ['QUTE_TEXT'],
}
with open(os.environ['QUTE_HTML'], 'r') as f:
data['html'] = f.read()
with open(os.environ['QUTE_TEXT'], 'r') as f:
data['text'] = f.read()
with open(os.environ['QUTE_FIFO'], 'w') as f:
json.dump(data, f)
f.write('\n')
""")
with qtbot.waitSignal(runner.finished, timeout=10000):
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args)
runner.store_html('This is HTML')
runner.store_text('This is text')
data = blocker.args[0]
parsed = json.loads(data)
assert parsed['text'] == 'This is text'
assert parsed['html'] == 'This is HTML'
assert not os.path.exists(parsed['text_file'])
assert not os.path.exists(parsed['html_file'])
def test_command_with_error(qtbot, py_proc, runner):
cmd, args = py_proc(r"""
import sys, os, json
with open(os.environ['QUTE_FIFO'], 'w') as f:
json.dump(os.environ['QUTE_TEXT'], f)
f.write('\n')
sys.exit(1)
""")
with qtbot.waitSignal(runner.finished, timeout=10000):
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args)
runner.store_text('Hello World')
runner.store_html('')
data = json.loads(blocker.args[0])
assert not os.path.exists(data)
def test_killed_command(qtbot, tmpdir, py_proc, runner):
data_file = tmpdir / 'data'
watcher = QFileSystemWatcher()
watcher.addPath(str(tmpdir))
cmd, args = py_proc(r"""
import os
import time
import sys
import json
data = {
'pid': os.getpid(),
'text_file': os.environ['QUTE_TEXT'],
}
# We can't use QUTE_FIFO to transmit the PID because that wouldn't work
# on Windows, where QUTE_FIFO is only monitored after the script has
# exited.
with open(sys.argv[1], 'w') as f:
json.dump(data, f)
time.sleep(30)
""")
args.append(str(data_file))
with qtbot.waitSignal(watcher.directoryChanged, timeout=10000):
runner.prepare_run(cmd, *args)
runner.store_text('Hello World')
runner.store_html('')
# Make sure the PID was written to the file, not just the file created
time.sleep(0.5)
data = json.load(data_file)
with qtbot.waitSignal(runner.finished):
os.kill(int(data['pid']), signal.SIGTERM)
assert not os.path.exists(data['text_file'])
def test_temporary_files_failed_cleanup(caplog, qtbot, py_proc, runner):
"""Delete a temporary file from the script so cleanup fails."""
cmd, args = py_proc(r"""
import os
os.remove(os.environ['QUTE_HTML'])
""")
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(runner.finished, timeout=10000):
runner.prepare_run(cmd, *args)
runner.store_text('')
runner.store_html('')
assert len(caplog.records) == 1
expected = "Failed to delete tempfile"
assert caplog.records[0].message.startswith(expected)
def test_unsupported(monkeypatch, tabbed_browser_stubs):
monkeypatch.setattr(userscripts.os, 'name', 'toaster')
with pytest.raises(userscripts.UnsupportedError) as excinfo:
userscripts.run_async(tab=None, cmd=None, win_id=0, env=None)
expected = "Userscripts are not supported on this platform!"
assert str(excinfo.value) == expected
| gpl-3.0 | -2,509,098,262,350,676,500 | 29.037657 | 79 | 0.627525 | false |
e-dorigatti/pyspider | pyspider/processor/project_module.py | 6 | 7447 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-02-16 22:24:20
import os
import six
import sys
import imp
import time
import weakref
import logging
import inspect
import traceback
import linecache
from pyspider.libs import utils
from pyspider.libs.log import SaveLogHandler, LogFormatter
logger = logging.getLogger("processor")
class ProjectManager(object):
"""
load projects from projectdb, update project
"""
CHECK_PROJECTS_INTERVAL = 5 * 60
RELOAD_PROJECT_INTERVAL = 60 * 60
@staticmethod
def build_module(project, env={}):
'''Build project script as module'''
from pyspider.libs import base_handler
assert 'name' in project, 'need name of project'
assert 'script' in project, 'need script of project'
# fix for old non-package version scripts
pyspider_path = os.path.join(os.path.dirname(__file__), "..")
if pyspider_path not in sys.path:
sys.path.insert(1, pyspider_path)
env = dict(env)
env.update({
'debug': project.get('status', 'DEBUG') == 'DEBUG',
})
loader = ProjectLoader(project)
module = loader.load_module(project['name'])
# logger inject
module.log_buffer = []
module.logging = module.logger = logging.Logger(project['name'])
if env.get('enable_stdout_capture', True):
handler = SaveLogHandler(module.log_buffer)
handler.setFormatter(LogFormatter(color=False))
else:
handler = logging.StreamHandler()
handler.setFormatter(LogFormatter(color=True))
module.logger.addHandler(handler)
if '__handler_cls__' not in module.__dict__:
BaseHandler = module.__dict__.get('BaseHandler', base_handler.BaseHandler)
for each in list(six.itervalues(module.__dict__)):
if inspect.isclass(each) and each is not BaseHandler \
and issubclass(each, BaseHandler):
module.__dict__['__handler_cls__'] = each
_class = module.__dict__.get('__handler_cls__')
assert _class is not None, "need BaseHandler in project module"
instance = _class()
instance.__env__ = env
instance.project_name = project['name']
instance.project = project
return {
'loader': loader,
'module': module,
'class': _class,
'instance': instance,
'exception': None,
'exception_log': '',
'info': project,
'load_time': time.time(),
}
def __init__(self, projectdb, env):
self.projectdb = projectdb
self.env = env
self.projects = {}
self.last_check_projects = time.time()
def _need_update(self, project_name, updatetime=None, md5sum=None):
'''Check if project_name need update'''
if project_name not in self.projects:
return True
elif md5sum and md5sum != self.projects[project_name]['info'].get('md5sum'):
return True
elif updatetime and updatetime > self.projects[project_name]['info'].get('updatetime', 0):
return True
elif time.time() - self.projects[project_name]['load_time'] > self.RELOAD_PROJECT_INTERVAL:
return True
return False
def _check_projects(self):
'''Check projects by last update time'''
for project in self.projectdb.check_update(self.last_check_projects,
['name', 'updatetime']):
if project['name'] not in self.projects:
continue
if project['updatetime'] > self.projects[project['name']]['info'].get('updatetime', 0):
self._update_project(project['name'])
self.last_check_projects = time.time()
def _update_project(self, project_name):
'''Update one project from database'''
project = self.projectdb.get(project_name)
if not project:
return None
return self._load_project(project)
def _load_project(self, project):
'''Load project into self.projects from project info dict'''
try:
project['md5sum'] = utils.md5string(project['script'])
ret = self.build_module(project, self.env)
self.projects[project['name']] = ret
except Exception as e:
logger.exception("load project %s error", project.get('name', None))
ret = {
'loader': None,
'module': None,
'class': None,
'instance': None,
'exception': e,
'exception_log': traceback.format_exc(),
'info': project,
'load_time': time.time(),
}
self.projects[project['name']] = ret
return False
logger.debug('project: %s updated.', project.get('name', None))
return True
def get(self, project_name, updatetime=None, md5sum=None):
'''get project data object, return None if not exists'''
if time.time() - self.last_check_projects > self.CHECK_PROJECTS_INTERVAL:
self._check_projects()
if self._need_update(project_name, updatetime, md5sum):
self._update_project(project_name)
return self.projects.get(project_name, None)
class ProjectFinder(object):
'''ProjectFinder class for sys.meta_path'''
def __init__(self, projectdb):
self.get_projectdb = weakref.ref(projectdb)
@property
def projectdb(self):
return self.get_projectdb()
def find_module(self, fullname, path=None):
if fullname == 'projects':
return self
parts = fullname.split('.')
if len(parts) == 2 and parts[0] == 'projects':
name = parts[1]
if not self.projectdb:
return
info = self.projectdb.get(name)
if info:
return ProjectLoader(info)
def load_module(self, fullname):
mod = imp.new_module(fullname)
mod.__file__ = '<projects>'
mod.__loader__ = self
mod.__path__ = ['<projects>']
mod.__package__ = 'projects'
return mod
def is_package(self, fullname):
return True
class ProjectLoader(object):
'''ProjectLoader class for sys.meta_path'''
def __init__(self, project, mod=None):
self.project = project
self.name = project['name']
self.mod = mod
def load_module(self, fullname):
if self.mod is None:
self.mod = mod = imp.new_module(fullname)
else:
mod = self.mod
mod.__file__ = '<%s>' % self.name
mod.__loader__ = self
mod.__project__ = self.project
mod.__package__ = ''
code = self.get_code(fullname)
six.exec_(code, mod.__dict__)
linecache.clearcache()
return mod
def is_package(self, fullname):
return False
def get_code(self, fullname):
return compile(self.get_source(fullname), '<%s>' % self.name, 'exec')
def get_source(self, fullname):
script = self.project['script']
if isinstance(script, six.text_type):
return script.encode('utf8')
return script
| apache-2.0 | 2,032,686,362,230,016,300 | 32.696833 | 99 | 0.570297 | false |
ScreamingUdder/mantid | Testing/SystemTests/tests/analysis/CompressEvents.py | 1 | 1888 | #pylint: disable=no-init,invalid-name,attribute-defined-outside-init
import stresstesting
from mantid.simpleapi import *
class CompressEventsTesting(stresstesting.MantidStressTest):
event_files = ["PG3_4844_event.nxs"] # /SNS/PG3/IPTS-2767/0/ for 2.5 hours
def requiredFiles(self):
return self.event_files
def runTest(self):
for filename in self.event_files:
wkspname = filename.split('.')[0]
outname = wkspname + '_out'
LoadEventNexus(Filename=filename, OutputWorkspace=wkspname, LoadMonitors=False)
totalEvents = mtd[wkspname].getNumberEvents()
SumSpectra(InputWorkspace=wkspname, OutputWorkspace=wkspname)
# max for Integration algorithm is not inclusive
for name in (outname, wkspname): # first out of place, then in place
CompressEvents(InputWorkspace=wkspname, OutputWorkspace=name,
WallClockTolerance=10.)
integral = Integration(InputWorkspace=name, RangeUpper=20000.)
compress10s = integral.readY(0)[0]
CompressEvents(InputWorkspace=wkspname, OutputWorkspace=name,
WallClockTolerance=3600.)
integral = Integration(InputWorkspace=name, RangeUpper=20000.)
compress1h = integral.readY(0)[0]
CompressEvents(InputWorkspace=wkspname, OutputWorkspace=name)
integral = Integration(InputWorkspace=name, RangeUpper=20000.)
compressfull = integral.readY(0)[0]
if not (totalEvents == compress10s == compress1h == compressfull):
# TODO use new style formatting
msg = '%s - total=%f 10s=%f 1h=%f full=%f' % (name, totalEvents, compress10s, compress1h, compressfull)
raise RuntimeError(msg)
| gpl-3.0 | -6,123,531,465,325,404,000 | 45.04878 | 123 | 0.627119 | false |
node13h/droll | droll/blog/tests/test_views.py | 1 | 3526 | # Copyright (C) 2017 Sergej Alikov <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest.mock import MagicMock
from django.test import TestCase, RequestFactory
from django.http import Http404
from .. import views
from .factories import PostFactory
class RollViewTestCase(TestCase):
def setUp(self):
self.view = views.RollView.as_view()
def test_unauthenticated_empty(self):
request = RequestFactory().get('/')
request.user = MagicMock()
request.user.is_authenticated.return_value = False
response = self.view(request)
self.assertContains(response, '<html')
def test_template(self):
request = RequestFactory().get('/')
request.user = MagicMock()
request.user.is_authenticated.return_value = False
PostFactory(body='POST-BODY-MARKER', public=True)
response = self.view(request)
self.assertContains(response, 'POST-BODY-MARKER')
def test_public(self):
request = RequestFactory().get('/')
request.user = MagicMock()
request.user.is_authenticated.return_value = False
PostFactory(body='POST1-BODY-MARKER', public=True)
PostFactory(body='POST2-BODY-MARKER')
response = self.view(request)
posts = response.context_data['object_list']
self.assertTrue(posts.filter(body='POST1-BODY-MARKER').exists())
self.assertFalse(posts.filter(body='POST2-BODY-MARKER').exists())
class PostDetailViewTestCase(TestCase):
def setUp(self):
self.view = views.PostDetailView.as_view()
def test_unauthenticated_public(self):
post = PostFactory(body='POST-BODY-MARKER', public=True)
request = RequestFactory().get(post.get_absolute_url())
request.user = MagicMock()
request.user.is_authenticated.return_value = False
response = self.view(request, slug=post.slug)
self.assertContains(response, 'POST-BODY-MARKER')
self.assertEqual(response.context_data['object'], post)
self.assertEqual(response.context_data['post'], post)
def test_unauthenticated_private(self):
post = PostFactory(body='POST-BODY-MARKER', public=False)
request = RequestFactory().get(post.get_absolute_url())
request.user = MagicMock()
request.user.is_authenticated.return_value = False
with self.assertRaises(Http404):
self.view(request, slug=post.slug)
def test_unauthenticated_private_own(self):
post = PostFactory(body='POST-BODY-MARKER', public=False)
request = RequestFactory().get(post.get_absolute_url())
request.user = post.user
response = self.view(request, slug=post.slug)
self.assertContains(response, 'POST-BODY-MARKER')
self.assertEqual(response.context_data['object'], post)
self.assertEqual(response.context_data['post'], post)
| agpl-3.0 | 382,627,207,613,383,740 | 36.115789 | 77 | 0.688599 | false |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/python/keras/preprocessing/image/__init__.py | 4 | 2145 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for image data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.preprocessing.image import apply_transform
from tensorflow.python.keras._impl.keras.preprocessing.image import array_to_img
from tensorflow.python.keras._impl.keras.preprocessing.image import DirectoryIterator
from tensorflow.python.keras._impl.keras.preprocessing.image import flip_axis
from tensorflow.python.keras._impl.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras._impl.keras.preprocessing.image import img_to_array
from tensorflow.python.keras._impl.keras.preprocessing.image import Iterator
from tensorflow.python.keras._impl.keras.preprocessing.image import load_img
from tensorflow.python.keras._impl.keras.preprocessing.image import NumpyArrayIterator
from tensorflow.python.keras._impl.keras.preprocessing.image import random_brightness
from tensorflow.python.keras._impl.keras.preprocessing.image import random_channel_shift
from tensorflow.python.keras._impl.keras.preprocessing.image import random_rotation
from tensorflow.python.keras._impl.keras.preprocessing.image import random_shear
from tensorflow.python.keras._impl.keras.preprocessing.image import random_shift
from tensorflow.python.keras._impl.keras.preprocessing.image import random_zoom
del absolute_import
del division
del print_function
| mit | 4,021,981,804,133,122,600 | 54 | 88 | 0.788811 | false |
mvdbeek/tools-iuc | tools/metaphlan/customizemetadata.py | 12 | 18949 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import bz2
import json
import pickle
import re
from pathlib import Path
def load_from_json(json_fp):
'''
Read JSON file with marker metadata
:param json_fp: Path to JSON file
'''
with open(json_fp, 'r') as json_f:
data = json.load(json_f)
for m in data['markers']:
data['markers'][m]['ext'] = set(data['markers'][m]['ext'])
for t in data['taxonomy']:
if isinstance(data['taxonomy'][t], list):
data['taxonomy'][t] = tuple(data['taxonomy'][t])
return data
def dump_to_json(data, json_fp):
'''
Dump marker metadata to JSON file
:param json_fp: Path to JSON file
'''
for m in data['markers']:
data['markers'][m]['ext'] = list(data['markers'][m]['ext'])
with open(json_fp, 'w') as json_f:
json.dump(data, json_f)
def transform_pkl_to_json(pkl_fp, json_fp):
'''
Read Pickle file and drop it to a JSON file
:param pkl_fp: Path to input Pickle file
:param json_fp: Path to output JSON file
'''
# load metadata from Pickle file
with bz2.BZ2File(pkl_fp, 'r') as pkl_f:
in_metadata = pickle.load(pkl_f)
out_metadata = {
'markers': in_metadata['markers'],
'taxonomy': in_metadata['taxonomy'],
'merged_taxon': {}
}
# transform merged_taxons tuple keys to string
for k in in_metadata['merged_taxon']:
n = ' , '.join(k)
out_metadata[n] = in_metadata['merged_taxon'][k]
# dump metadata to JSON file
dump_to_json(out_metadata, json_fp)
def transform_json_to_pkl(json_fp, pkl_fp):
'''
Read JSON file and drop it to a Pickle file
:param json_fp: Path to input JSON file
:param pkl_fp: Path to output Pickle file
'''
# load metadata from JSON file
in_metadata = load_from_json(json_fp)
out_metadata = {
'markers': in_metadata['markers'],
'taxonomy': in_metadata['taxonomy'],
'merged_taxon': {}
}
# transform merged_taxons keys to tuple
for k in in_metadata['merged_taxon']:
n = ' , '.split(k)
out_metadata[n] = in_metadata['merged_taxon'][k]
# dump metadata to Pickle file
with bz2.BZ2File(pkl_fp, 'w') as pkl_f:
pickle.dump(out_metadata, pkl_f)
def add_marker(in_json_fp, out_json_fp, name, m_length, g_length, gca, k_name, k_id, p_name, p_id, c_name, c_id, o_name, o_id, f_name, f_id, g_name, g_id, s_name, s_id, t_name):
'''
Add marker to JSON file
:param in_json_fp: Path to input JSON file
:param out_json_fp: Path to output JSON file
:param name: Name of new marker
:param m_length: Length of new marker
:param g_length: List with lengths of genomes from which the new marker has been extracted
:param gca: List with GCA of genomes from which the new marker has been extracted
:param k_name: List with Name of Kingdom for genomes from which the new marker has been extracted
:param k_id: List with NCBI id of Kingdom for genomes from which the new marker has been extracted
:param p_name: List with Name of Phylum for genomes from which the new marker has been extracted
:param p_id: List with NCBI id of Phylum for genomes from which the new marker has been extracted
:param c_name: List with Name of Class for genomes from which the new marker has been extracted
:param c_id: List with NCBI id of Class for genomes from which the new marker has been extracted
:param o_name: List with Name of Order for genomes from which the new marker has been extracted
:param o_id: List with NCBI id of Order for genomes from which the new marker has been extracted
:param f_name: List with Name of Family for genomes from which the new marker has been extracted
:param f_id: List with NCBI id of Family for genomes from which the new marker has been extracted
:param g_name: List with Name of Genus for genomes from which the new marker has been extracted
:param g_id: List with NCBI id of Genus for genomes from which the new marker has been extracted
:param s_name: List with Name of Species for genomes from which the new marker has been extracted
:param s_id: List with NCBI id of Species for genomes from which the new marker has been extracted
:param t_name: List with Name of Strain for genomes from which the new marker has been extracted
'''
metadata = load_from_json(in_json_fp)
# check that all lists have same size
genome_n = len(g_length)
if len(gca) != genome_n:
raise ValueError("Missing/Extra values in GCA list")
if len(k_name) != genome_n:
raise ValueError("Missing/Extra values in Kingdom name list")
if len(k_id) != genome_n:
raise ValueError("Missing/Extra values in Kingdom ID list")
if len(p_name) != genome_n:
raise ValueError("Missing/Extra values in Phylum name list")
if len(p_id) != genome_n:
raise ValueError("Missing/Extra values in Phylum ID list")
if len(c_name) != genome_n:
raise ValueError("Missing/Extra values in Class name list")
if len(c_id) != genome_n:
raise ValueError("Missing/Extra values in Class ID list")
if len(o_name) != genome_n:
raise ValueError("Missing/Extra values in Order name list")
if len(o_id) != genome_n:
raise ValueError("Missing/Extra values in Order ID list")
if len(f_name) != genome_n:
raise ValueError("Missing/Extra values in Family name list")
if len(f_id) != genome_n:
raise ValueError("Missing/Extra values in Family ID list")
if len(g_name) != genome_n:
raise ValueError("Missing/Extra values in Genus name list")
if len(g_id) != genome_n:
raise ValueError("Missing/Extra values in Genus ID list")
if len(s_name) != genome_n:
raise ValueError("Missing/Extra values in Species name list")
if len(s_id) != genome_n:
raise ValueError("Missing/Extra values in Species ID list")
if len(t_name) != genome_n:
raise ValueError("Missing/Extra values in Strain name list")
# create dictionary to aggregate genome taxonomies and identify marker taxonomy
taxonomy = {
'k': set(),
'p': set(),
'c': set(),
'o': set(),
'f': set(),
'g': set(),
's': set(),
't': set(),
}
# parse genomes
for i in range(genome_n):
# add taxonomy of new genome
g_taxo_names = "k__%s|p__%s|c__%s|o__%s|f__%s|g__%s|s__%s|t__%s" % (
k_name[i],
p_name[i],
c_name[i],
o_name[i],
f_name[i],
g_name[i],
s_name[i],
t_name[i]
)
g_taxo_ids = "%s|%s|%s|%s|%s|%s|%s" % (
k_id[i],
p_id[i],
c_id[i],
o_id[i],
f_id[i],
g_id[i],
s_id[i]
)
metadata['taxonomy'][g_taxo_names] = (g_taxo_ids, g_length[i])
# aggregate taxon levels using sets
taxonomy['k'].add(k_name[i])
taxonomy['p'].add(p_name[i])
taxonomy['c'].add(c_name[i])
taxonomy['o'].add(o_name[i])
taxonomy['f'].add(f_name[i])
taxonomy['g'].add(g_name[i])
taxonomy['s'].add(s_name[i])
taxonomy['t'].add(t_name[i])
# extract clade and taxon of marker
clade = '' # last level before taxomy of genomes diverge
taxon = '' # combination of levels before divergence
for level in ['k', 'p', 'c', 'o', 'f', 'g', 's', 't']:
taxo = list(taxonomy[level])
if len(taxo) == 1:
clade = taxo[0]
taxon = "%s|%s__%s" % (taxon, level, taxo)
# add information about the new marker
metadata['markers'][name] = {
'clade': clade,
'ext': set(gca),
'len': m_length,
'taxon': taxon
}
dump_to_json(metadata, out_json_fp)
def format_markers(marker_l):
'''
Format markers
:param marker_l: list of markers
'''
markers = []
for m in marker_l:
m = m.rstrip()
if ' ' in m:
markers.append(m.split(' ')[0])
else:
markers.append(m)
return markers
def get_markers(marker_fp):
'''
Get markers from a file
:param marker_fp: Path to file with markers (1 per line)
'''
# load markers
with open(marker_fp, 'r') as marker_f:
markers = marker_f.readlines()
# format markers
markers = format_markers(markers)
return markers
def check_not_found_markers(found_markers, original_markers):
'''
Check list of markers
:param found_markers: list of found markers
:param original_markers: list of original markers
'''
if len(found_markers) != len(original_markers):
print('markers not found:')
for m in original_markers:
if m not in found_markers:
print('- "%s"' % m)
def prune_taxonomy(in_taxonomy, taxon_s, gca_s):
'''
Prune taxonomy to keep only listed taxonomy
:param in_taxonomy: dictionary with list of taxonomy
:param taxon_s: set of taxons to keep
:param gca_s: set of GCA ids to keep
'''
out_taxonomy = {}
kept_taxonomy = set()
kept_taxons = set()
kept_gca = set()
for t, v in in_taxonomy.items():
# check if t match element in list of taxon_s
kept_taxon = False
for t_k in taxon_s:
if t_k in t:
kept_taxon = True
out_taxonomy[t] = v
kept_taxonomy.add(t)
kept_taxons.add(t_k)
break
# check if GCA in the taxon id
s = re.search(r'GCA_\d+$', t)
if s:
gca = s[0]
# check if GCA in taxon id is in the list GCA to keep
if gca in gca_s:
kept_gca.add(gca)
if not kept_taxon:
out_taxonomy[t] = v
kept_taxonomy.add(t)
print('%s kept taxonomy' % len(kept_taxonomy))
print('%s / %s taxons not found' % (len(taxon_s) - len(kept_taxons), len(taxon_s)))
print('%s / %s GCA taxons not found' % (len(gca_s) - len(kept_gca), len(gca_s)))
return out_taxonomy
def remove_markers(in_json_fp, marker_fp, out_json_fp, kept_marker_fp):
'''
Remove markers from JSON file
:param in_json_fp: Path to input JSON file
:param marker_fp: Path to file with markers to remove (1 per line)
:param out_json_fp: Path to output JSON file
:param kept_marker_fp: Path to file with kept markers
'''
in_metadata = load_from_json(in_json_fp)
# load markers
markers_to_remove = set(get_markers(marker_fp))
print('%s markers to remove' % len(markers_to_remove))
# keep merged_taxon
out_metadata = {
'markers': {},
'taxonomy': {},
'merged_taxon': in_metadata['merged_taxon']
}
# parse markers to keep
removed_markers = []
kept_markers = []
taxons_to_keep = set()
gca_to_keep = set()
for m, v in in_metadata['markers'].items():
if m not in markers_to_remove:
out_metadata['markers'][m] = v
kept_markers.append(m)
taxons_to_keep.add(v['taxon'])
gca_to_keep.update(v['ext'])
else:
removed_markers.append(m)
print('%s removed markers' % len(removed_markers))
# check markers that are not found
check_not_found_markers(removed_markers, markers_to_remove)
# keep only taxonomy in taxons_to_keep or with GCA in gca_to_keep
out_metadata['taxonomy'] = prune_taxonomy(in_metadata['taxonomy'], taxons_to_keep, gca_to_keep)
# save to JSON
dump_to_json(out_metadata, out_json_fp)
# write list of kept markers
with open(kept_marker_fp, 'w') as kept_marker_f:
for m in kept_markers:
kept_marker_f.write("%s\n" % m)
def keep_markers(in_json_fp, marker_fp, out_json_fp):
'''
Keep markers from JSON file, others will be removed
:param in_json_fp: Path to input JSON file
:param marker_fp: Path to file with markers to keep (1 per line)
:param out_json_fp: Path to output JSON file
'''
in_metadata = load_from_json(in_json_fp)
# load markers
markers_to_keep = set(get_markers(marker_fp))
print('%s markers to keep' % len(markers_to_keep))
# keep merged_taxon
out_metadata = {
'markers': {},
'taxonomy': {},
'merged_taxon': in_metadata['merged_taxon']
}
# parse markers to keep
kept_markers = []
taxons_to_keep = set()
gca_to_keep = set()
for m, v in in_metadata['markers'].items():
if m in markers_to_keep:
out_metadata['markers'][m] = v
kept_markers.append(m)
taxons_to_keep.add(v['taxon'])
gca_to_keep.update(v['ext'])
print('%s kept markers' % len(kept_markers))
# check markers that are not found
check_not_found_markers(kept_markers, markers_to_keep)
# keep only taxonomy in taxons_to_keep or with GCA in gca_to_keep
out_metadata['taxonomy'] = prune_taxonomy(in_metadata['taxonomy'], taxons_to_keep, gca_to_keep)
# save to JSON
dump_to_json(out_metadata, out_json_fp)
if __name__ == '__main__':
# Read command line
parser = argparse.ArgumentParser(description='Customize MetaPhlan database')
subparsers = parser.add_subparsers(dest='function')
# transform_pkl_to_json subcommand
pkl_to_json_parser = subparsers.add_parser('transform_pkl_to_json', help='Transform Pickle to JSON to get marker metadata')
pkl_to_json_parser.add_argument('--pkl', help="Path to input Pickle file")
pkl_to_json_parser.add_argument('--json', help="Path to output JSON file")
# transform_json_to_pkl subcommand
json_to_pkl_parser = subparsers.add_parser('transform_json_to_pkl', help='Transform JSON to Pickle to push marker metadata')
json_to_pkl_parser.add_argument('--json', help="Path to input JSON file")
json_to_pkl_parser.add_argument('--pkl', help="Path to output Pickle file")
# add_marker subcommand
add_marker_parser = subparsers.add_parser('add_marker', help='Add new marker to JSON file')
add_marker_parser.add_argument('--in_json', help="Path to input JSON file")
add_marker_parser.add_argument('--out_json', help="Path to output JSON file")
add_marker_parser.add_argument('--name', help="Name of new marker")
add_marker_parser.add_argument('--m_length', help="Length of new marker")
add_marker_parser.add_argument('--g_length', help="Length of genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--gca', help="GCA of genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--k_name', help="Name of Kingdom for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--k_id', help="NCBI id of Kingdom for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--p_name', help="Name of Phylum for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--p_id', help="NCBI id of Phylum for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--c_name', help="Name of Class for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--c_id', help="NCBI id of Class for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--o_name', help="Name of Order for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--o_id', help="NCBI id of Order for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--f_name', help="Name of Family for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--f_id', help="NCBI id of Family for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--g_name', help="Name of Genus for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--g_id', help="NCBI id of Genus for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--s_name', help="Name of Species for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--s_id', help="NCBI id of Species for genome from which the new marker has been extracted", action="append")
add_marker_parser.add_argument('--t_name', help="Name of Strain for genome from which the new marker has been extracted", action="append")
# remove_markers subcommand
remove_markers_parser = subparsers.add_parser('remove_markers', help='Remove markers from JSON file')
remove_markers_parser.add_argument('--in_json', help="Path to input JSON file")
remove_markers_parser.add_argument('--markers', help="Path to file with markers to remove (1 per line)")
remove_markers_parser.add_argument('--out_json', help="Path to output JSON file")
remove_markers_parser.add_argument('--kept_markers', help="Path to file with kept markers")
# keep_markers subcommand
keep_markers_parser = subparsers.add_parser('keep_markers', help='Keep markers from JSON file, others will be removed')
keep_markers_parser.add_argument('--in_json', help="Path to input JSON file")
keep_markers_parser.add_argument('--markers', help="Path to file with markers to keep (1 per line)")
keep_markers_parser.add_argument('--out_json', help="Path to output JSON file")
args = parser.parse_args()
if args.function == 'transform_pkl_to_json':
transform_pkl_to_json(Path(args.pkl), Path(args.json))
elif args.function == 'transform_json_to_pkl':
transform_json_to_pkl(Path(args.json), Path(args.pkl))
elif args.function == 'add_marker':
add_marker(
args.in_json,
args.out_json,
args.name,
args.m_length,
args.g_length,
args.gca,
args.k_name,
args.k_id,
args.p_name,
args.p_id,
args.c_name,
args.c_id,
args.o_name,
args.o_id,
args.f_name,
args.f_id,
args.g_name,
args.g_id,
args.s_name,
args.s_id,
args.t_name)
elif args.function == 'remove_markers':
remove_markers(args.in_json, args.markers, args.out_json, args.kept_markers)
elif args.function == 'keep_markers':
keep_markers(args.in_json, args.markers, args.out_json)
| mit | -2,116,030,210,486,993,200 | 38.477083 | 177 | 0.621721 | false |
houssemFat/bloodOn | bloodon/templatetags/bloodi_extras.py | 1 | 1190 | from string import lower, split
from django import template
#tr_msg_dic
#from django.utils import simplejson
register = template.Library()
@register.filter
def get_tr_value(dictionary, arg):
#return dictionary
if arg:
value_ = str(arg)
try :
return dictionary[value_] #arg#simplejson.dumps(value)
except ValueError:
return ''
else :
return ''
@register.filter
def get_form_error(dictionary, arg):
if arg :
try :
string = split (lower (str(arg)), '#')
error_key = string[0]
error_field = dictionary [error_key]
error_code = dictionary [string[1]]
return ' '.join ([ error_code, error_field])
except ValueError:
return ''
else :
return ''
@register.filter
def format_phone(number):
format_ = '## ### ###'
length = len (number)
format_length = len (format_)
formatted = ''
i = 0
j = 0
while (j < length and i < format_length):
if (format_[i] == '#'):
formatted += number[j]
j +=1
else:
formatted += ' '
i += 1
return formatted
| mit | -6,872,206,881,221,684,000 | 22.8 | 66 | 0.536975 | false |
MartinHjelmare/home-assistant | homeassistant/components/ads/sensor.py | 7 | 2335 | """Support for ADS sensors."""
import logging
import voluptuous as vol
from homeassistant.components import ads
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT
import homeassistant.helpers.config_validation as cv
from . import CONF_ADS_FACTOR, CONF_ADS_TYPE, CONF_ADS_VAR, \
AdsEntity, STATE_KEY_STATE
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ADS sensor"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ADS_VAR): cv.string,
vol.Optional(CONF_ADS_FACTOR): cv.positive_int,
vol.Optional(CONF_ADS_TYPE, default=ads.ADSTYPE_INT):
vol.In([ads.ADSTYPE_INT, ads.ADSTYPE_UINT, ads.ADSTYPE_BYTE,
ads.ADSTYPE_DINT, ads.ADSTYPE_UDINT]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=''): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an ADS sensor device."""
ads_hub = hass.data.get(ads.DATA_ADS)
ads_var = config.get(CONF_ADS_VAR)
ads_type = config.get(CONF_ADS_TYPE)
name = config.get(CONF_NAME)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
factor = config.get(CONF_ADS_FACTOR)
entity = AdsSensor(
ads_hub, ads_var, ads_type, name, unit_of_measurement, factor)
add_entities([entity])
class AdsSensor(AdsEntity):
"""Representation of an ADS sensor entity."""
def __init__(self, ads_hub, ads_var, ads_type, name, unit_of_measurement,
factor):
"""Initialize AdsSensor entity."""
super().__init__(ads_hub, name, ads_var)
self._unit_of_measurement = unit_of_measurement
self._ads_type = ads_type
self._factor = factor
async def async_added_to_hass(self):
"""Register device notification."""
await self.async_initialize_device(
self._ads_var,
self._ads_hub.ADS_TYPEMAP[self._ads_type],
STATE_KEY_STATE,
self._factor)
@property
def state(self):
"""Return the state of the device."""
return self._state_dict[STATE_KEY_STATE]
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
| apache-2.0 | -2,115,013,889,005,415,000 | 31.887324 | 77 | 0.659957 | false |
boreq/archive_chan | archive_chan/management/commands/archive_chan_recount_denormalized.py | 1 | 1224 | from django.db.models import Max, Min, Count
from django.core.management.base import BaseCommand
from archive_chan.models import Thread
class Command(BaseCommand):
args = ''
help = 'Recount the data in the thread model.'
def handle(self, *args, **options):
threads = Thread.objects.annotate(
correct_first_reply=Min('post__time'),
correct_last_reply=Max('post__time'),
correct_replies=Count('post'),
correct_images=Count('post__image')
)
total = 0
updated = 0
for thread in threads:
if (thread.correct_first_reply != thread.first_reply
or thread.correct_last_reply != thread.last_reply
or thread.correct_replies != thread.replies
or thread.correct_images != thread.images):
thread.first_reply = thread.correct_first_reply
thread.last_reply = thread.correct_last_reply
thread.replies = thread.correct_replies
thread.images = thread.correct_images
thread.save()
updated += 1
total += 1
print('Total: %s Updated: %s' % (total, updated))
| gpl-2.0 | 8,310,147,761,891,992,000 | 31.210526 | 65 | 0.576797 | false |
dharryman/BPM_Test_Framework | Gate_Source/Rigol3030DSG_GateSource_test.py | 1 | 3888 | import unittest
from mock import patch
import Gate_Source
import telnetlib
# Checks the simple get requests against the criteria set here
output = "1"
period = "3uS"
dutycycle = "0.03uS"
def mocked_rigol_replies(input):
global output, period, dutycycle
if input == "MOD:STAT?":
return output
elif input == "PULM:PER?":
return period
elif input == "PULM:WIDT?":
return dutycycle
elif input == "*IDN?":
return "Rigol Technologies,DSG3030"
def mocked_rigol_writes(input):
global output, period, dutycycle
if input == "PULM:OUT:STAT OFF":
output = "0"
elif input == "PULM:OUT:STAT ON":
output = "1"
# for set tests to be implimented, reg ex or something similar will go here, to scan
# the input string. This will then be used to set the globals listed above. Then they
# can be read back using the 'mocked_rigol_replies' function.
class ExpectedDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Stuff you only run once
super(ExpectedDataTest, cls).setUpClass()
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_write")
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_query", side_effect=mocked_rigol_replies)
@patch("telnetlib.Telnet")
def setUp(self, mock_telnet, mock_telnet_query, mock_telnet_write):
# Stuff you run before each test
self.GS_test_inst = Gate_Source.Rigol3030DSG_GateSource("0", 0, 0)
unittest.TestCase.setUp(self)
def tearDown(self):
# Stuff you want to run after each test
pass
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_write")
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_read")
def test_set_pulse_dutycycle_with_invalid_input(self, mock_telnet_read, mock_telnet_write):
self.assertRaises(ValueError, self.GS_test_inst.set_pulse_dutycycle, -0.1)
self.assertRaises(ValueError, self.GS_test_inst.set_pulse_dutycycle, 1.1)
self.assertRaises(TypeError, self.GS_test_inst.set_pulse_dutycycle, "0.5")
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_write")
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_read")
def test_set_pulse_period_with_invalid_input(self, mock_telnet_read, mock_telnet_write):
self.assertRaises(ValueError, self.GS_test_inst.set_pulse_period, -0.1)
self.assertRaises(TypeError, self.GS_test_inst.set_pulse_period, "1.1")
#######################################################
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_write", side_effect=mocked_rigol_writes)
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_query", side_effect=mocked_rigol_replies)
def test_modulation_state_when_output_state_is_changed(self, mock_query, mock_write):
self.assertEqual(self.GS_test_inst.turn_on_modulation(), True)
self.assertEqual(self.GS_test_inst.get_modulation_state(), True)
self.assertEqual(self.GS_test_inst.turn_off_modulation(), False)
self.assertEqual(self.GS_test_inst.get_modulation_state(), False)
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_write", side_effect=mocked_rigol_writes)
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_query", side_effect=mocked_rigol_replies)
def test_get_dutycycle_return_values_if_expected_input_types_used(self, mock_query, mock_write):
self.assertEqual(self.GS_test_inst.get_pulse_dutycycle(), (0.01))
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_write", side_effect=mocked_rigol_writes)
@patch("Gate_Source.Rigol3030DSG_GateSource._telnet_query", side_effect=mocked_rigol_replies)
def test_get_pulse_period_return_values_if_expected_input_types_used(self, mock_query, mock_write):
self.assertEqual(self.GS_test_inst.get_pulse_period(), (3,"3uS"))
if __name__ == "__main__":
unittest.main() | apache-2.0 | 6,348,148,879,351,824,000 | 42.211111 | 103 | 0.692387 | false |
RevelSystems/django | tests/i18n/patterns/tests.py | 12 | 14314 | from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import clear_url_caches, reverse, translate_url
from django.http import HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.utils import override_script_prefix
from django.utils import translation
from django.utils._os import upath
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
],
LANGUAGE_CODE='en-us',
LANGUAGES=[
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.patterns.urls.default',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
],
},
}],
)
class URLTestCaseBase(TestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
self.assertRaises(ImproperlyConfigured, lambda: reverse('account:register'))
@override_settings(ROOT_URLCONF='i18n.patterns.urls.disabled')
class URLDisabledTests(URLTestCaseBase):
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root urlconfs, then no
language activation happens based on url prefix.
"""
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
def test_translate_url_utility(self):
with translation.override('en'):
self.assertEqual(translate_url('/en/non-existent/', 'nl'), '/en/non-existent/')
self.assertEqual(translate_url('/en/users/', 'nl'), '/nl/gebruikers/')
# Namespaced URL
self.assertEqual(translate_url('/en/account/register/', 'nl'), '/nl/profiel/registeren/')
self.assertEqual(translation.get_language(), 'en')
with translation.override('nl'):
self.assertEqual(translate_url('/nl/gebruikers/', 'en'), '/en/users/')
self.assertEqual(translation.get_language(), 'nl')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE_CLASSES=[
'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare',
'django.middleware.common.CommonMiddleware',
],
)
def test_custom_redirect_class(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/', 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
Tests that 'Accept-Language' is not added to the Vary header when using
prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# We only want one redirect, bypassing CommonMiddleware
self.assertListEqual(response.redirect_chain, [('/en/account/register/', 302)])
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLRedirectWithScriptAliasTests(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def test_language_prefix_with_script_prefix(self):
prefix = '/script_prefix'
with override_script_prefix(prefix):
response = self.client.get('/prefixed/', HTTP_ACCEPT_LANGUAGE='en', SCRIPT_NAME=prefix)
self.assertRedirects(response, '%s/en/prefixed/' % prefix, target_status_code=404)
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
| bsd-3-clause | -6,864,494,333,051,821,000 | 39.435028 | 112 | 0.647268 | false |
t794104/ansible | lib/ansible/modules/cloud/docker/docker_compose.py | 11 | 39515 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: docker_compose
short_description: Manage docker services and containers.
version_added: "2.1"
author: "Chris Houseknecht (@chouseknecht)"
description:
- Consumes docker compose to start, shutdown and scale services.
- Works with compose versions 1 and 2.
- Compose can be read from a docker-compose.yml (or .yaml) file or inline using the C(definition) option.
- See the examples for more details.
- Supports check mode.
- This module was called C(docker_service) before Ansible 2.8. The usage did not change.
options:
project_src:
description:
- Path to a directory containing a docker-compose.yml or docker-compose.yaml file.
- Mutually exclusive with C(definition).
- Required when no C(definition) is provided.
type: path
project_name:
description:
- Provide a project name. If not provided, the project name is taken from the basename of C(project_src).
- Required when C(definition) is provided.
type: str
files:
description:
- List of file names relative to C(project_src). Overrides docker-compose.yml or docker-compose.yaml.
- Files are loaded and merged in the order given.
type: list
state:
description:
- Desired state of the project.
- Specifying I(present) is the same as running I(docker-compose up).
- Specifying I(absent) is the same as running I(docker-compose down).
type: str
default: present
choices:
- absent
- present
services:
description:
- When C(state) is I(present) run I(docker-compose up) on a subset of services.
type: list
scale:
description:
- When C(state) is I(present) scale services. Provide a dictionary of key/value pairs where the key
is the name of the service and the value is an integer count for the number of containers.
type: dict
dependencies:
description:
- When C(state) is I(present) specify whether or not to include linked services.
type: bool
default: yes
definition:
description:
- Provide docker-compose yaml describing one or more services, networks and volumes.
- Mutually exclusive with C(project_src) and C(files).
type: dict
hostname_check:
description:
- Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
type: bool
default: no
recreate:
description:
- By default containers will be recreated when their configuration differs from the service definition.
- Setting to I(never) ignores configuration differences and leaves existing containers unchanged.
- Setting to I(always) forces recreation of all existing containers.
type: str
default: smart
choices:
- always
- never
- smart
build:
description:
- Use with state I(present) to always build images prior to starting the application.
- Same as running docker-compose build with the pull option.
- Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
- Use the C(nocache) option to ignore the image cache when performing the build.
- If an existing image is replaced, services using the image will be recreated unless C(recreate) is I(never).
type: bool
default: no
pull:
description:
- Use with state I(present) to always pull images prior to starting the application.
- Same as running docker-compose pull.
- When a new image is pulled, services using the image will be recreated unless C(recreate) is I(never).
type: bool
default: no
version_added: "2.2"
nocache:
description:
- Use with the build option to ignore the cache during the image build process.
type: bool
default: no
version_added: "2.2"
remove_images:
description:
- Use with state I(absent) to remove the all images or only local images.
type: str
choices:
- 'all'
- 'local'
remove_volumes:
description:
- Use with state I(absent) to remove data volumes.
type: bool
default: no
stopped:
description:
- Use with state I(present) to leave the containers in an exited or non-running state.
type: bool
default: no
restarted:
description:
- Use with state I(present) to restart all containers.
type: bool
default: no
remove_orphans:
description:
- Remove containers for services not defined in the compose file.
type: bool
default: no
timeout:
description:
- timeout in seconds for container shutdown when attached or when containers are already running.
type: int
default: 10
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "docker-compose >= 1.7.0"
- "Docker API >= 1.20"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
# Examples use the django example at U(https://docs.docker.com/compose/django/). Follow it to create the flask
# directory
- name: Run using a project directory
hosts: localhost
gather_facts: no
tasks:
- docker_compose:
project_src: flask
state: absent
- docker_compose:
project_src: flask
register: output
- debug:
var: output
- docker_compose:
project_src: flask
build: no
register: output
- debug:
var: output
- assert:
that: "not output.changed "
- docker_compose:
project_src: flask
build: no
stopped: true
register: output
- debug:
var: output
- assert:
that:
- "not web.flask_web_1.state.running"
- "not db.flask_db_1.state.running"
- docker_compose:
project_src: flask
build: no
restarted: true
register: output
- debug:
var: output
- assert:
that:
- "web.flask_web_1.state.running"
- "db.flask_db_1.state.running"
- name: Scale the web service to 2
hosts: localhost
gather_facts: no
tasks:
- docker_compose:
project_src: flask
scale:
web: 2
register: output
- debug:
var: output
- name: Run with inline v2 compose
hosts: localhost
gather_facts: no
tasks:
- docker_compose:
project_src: flask
state: absent
- docker_compose:
project_name: flask
definition:
version: '2'
services:
db:
image: postgres
web:
build: "{{ playbook_dir }}/flask"
command: "python manage.py runserver 0.0.0.0:8000"
volumes:
- "{{ playbook_dir }}/flask:/code"
ports:
- "8000:8000"
depends_on:
- db
register: output
- debug:
var: output
- assert:
that:
- "web.flask_web_1.state.running"
- "db.flask_db_1.state.running"
- name: Run with inline v1 compose
hosts: localhost
gather_facts: no
tasks:
- docker_compose:
project_src: flask
state: absent
- docker_compose:
project_name: flask
definition:
db:
image: postgres
web:
build: "{{ playbook_dir }}/flask"
command: "python manage.py runserver 0.0.0.0:8000"
volumes:
- "{{ playbook_dir }}/flask:/code"
ports:
- "8000:8000"
links:
- db
register: output
- debug:
var: output
- assert:
that:
- "web.flask_web_1.state.running"
- "db.flask_db_1.state.running"
'''
RETURN = '''
services:
description:
- A dictionary mapping the service's name to a dictionary of containers.
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
are also accessible directly. The service's name is the variable with which the container dictionary
can be accessed. Note that the returned facts will be removed in Ansible 2.12.
returned: success
type: complex
contains:
container_name:
description: Name of the container. Format is I(project_service_#).
returned: success
type: complex
contains:
cmd:
description: One or more commands to be executed in the container.
returned: success
type: list
example: ["postgres"]
image:
description: Name of the image from which the container was built.
returned: success
type: str
example: postgres
labels:
description: Meta data assigned to the container.
returned: success
type: complex
example: {...}
networks:
description: Contains a dictionary for each network to which the container is a member.
returned: success
type: complex
contains:
IPAddress:
description: The IP address assigned to the container.
returned: success
type: str
example: 172.17.0.2
IPPrefixLen:
description: Number of bits used by the subnet.
returned: success
type: int
example: 16
aliases:
description: Aliases assigned to the container by the network.
returned: success
type: list
example: ['db']
globalIPv6:
description: IPv6 address assigned to the container.
returned: success
type: str
example: ''
globalIPv6PrefixLen:
description: IPv6 subnet length.
returned: success
type: int
example: 0
links:
description: List of container names to which this container is linked.
returned: success
type: list
example: null
macAddress:
description: Mac Address assigned to the virtual NIC.
returned: success
type: str
example: "02:42:ac:11:00:02"
state:
description: Information regarding the current disposition of the container.
returned: success
type: complex
contains:
running:
description: Whether or not the container is up with a running process.
returned: success
type: bool
example: true
status:
description: Description of the running state.
returned: success
type: str
example: running
actions:
description: Provides the actions to be taken on each service as determined by compose.
returned: when in check mode or I(debug) true
type: complex
contains:
service_name:
description: Name of the service.
returned: always
type: complex
contains:
pulled_image:
description: Provides image details when a new image is pulled for the service.
returned: on image pull
type: complex
contains:
name:
description: name of the image
returned: always
type: str
id:
description: image hash
returned: always
type: str
built_image:
description: Provides image details when a new image is built for the service.
returned: on image build
type: complex
contains:
name:
description: name of the image
returned: always
type: str
id:
description: image hash
returned: always
type: str
action:
description: A descriptive name of the action to be performed on the service's containers.
returned: always
type: list
contains:
id:
description: the container's long ID
returned: always
type: str
name:
description: the container's name
returned: always
type: str
short_id:
description: the container's short ID
returned: always
type: str
'''
import os
import re
import sys
import tempfile
from contextlib import contextmanager
from distutils.version import LooseVersion
try:
import yaml
HAS_YAML = True
HAS_YAML_EXC = None
except ImportError as exc:
HAS_YAML = False
HAS_YAML_EXC = str(exc)
try:
from compose import __version__ as compose_version
from compose.cli.command import project_from_options
from compose.service import NoSuchImageError
from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
HAS_COMPOSE = True
HAS_COMPOSE_EXC = None
MINIMUM_COMPOSE_VERSION = '1.7.0'
except ImportError as exc:
HAS_COMPOSE = False
HAS_COMPOSE_EXC = str(exc)
DEFAULT_TIMEOUT = 10
from ansible.module_utils.docker.common import AnsibleDockerClient, DockerBaseClass
AUTH_PARAM_MAPPING = {
u'docker_host': u'--host',
u'tls': u'--tls',
u'cacert_path': u'--tlscacert',
u'cert_path': u'--tlscert',
u'key_path': u'--tlskey',
u'tls_verify': u'--tlsverify'
}
@contextmanager
def stdout_redirector(path_name):
old_stdout = sys.stdout
fd = open(path_name, 'w')
sys.stdout = fd
try:
yield
finally:
sys.stdout = old_stdout
@contextmanager
def stderr_redirector(path_name):
old_fh = sys.stderr
fd = open(path_name, 'w')
sys.stderr = fd
try:
yield
finally:
sys.stderr = old_fh
def make_redirection_tempfiles():
dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
return (out_redir_name, err_redir_name)
def cleanup_redirection_tempfiles(out_name, err_name):
for i in [out_name, err_name]:
os.remove(i)
def get_redirected_output(path_name):
output = []
with open(path_name, 'r') as fd:
for line in fd:
# strip terminal format/color chars
new_line = re.sub(r'\x1b\[.+m', '', line)
output.append(new_line)
os.remove(path_name)
return output
def attempt_extract_errors(exc_str, stdout, stderr):
errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
# assume either the exception body (if present) or the last warning was the 'most'
# fatal.
if exc_str.strip():
msg = exc_str.strip()
elif errors:
msg = errors[-1].encode('utf-8')
else:
msg = 'unknown cause'
return {
'warnings': [w.encode('utf-8') for w in warnings],
'errors': [e.encode('utf-8') for e in errors],
'msg': msg,
'module_stderr': ''.join(stderr),
'module_stdout': ''.join(stdout)
}
def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
if err_name is None:
stderr = []
else:
stderr = get_redirected_output(err_name)
stdout = get_redirected_output(out_name)
reason = attempt_extract_errors(str(exc), stdout, stderr)
reason['msg'] = msg_format % reason['msg']
return reason
class ContainerManager(DockerBaseClass):
def __init__(self, client):
super(ContainerManager, self).__init__()
self.client = client
self.project_src = None
self.files = None
self.project_name = None
self.state = None
self.definition = None
self.hostname_check = None
self.timeout = None
self.remove_images = None
self.remove_orphans = None
self.remove_volumes = None
self.stopped = None
self.restarted = None
self.recreate = None
self.build = None
self.dependencies = None
self.services = None
self.scale = None
self.debug = None
self.pull = None
self.nocache = None
for key, value in client.module.params.items():
setattr(self, key, value)
self.check_mode = client.check_mode
if not self.debug:
self.debug = client.module._debug
self.options = dict()
self.options.update(self._get_auth_options())
self.options[u'--skip-hostname-check'] = (not self.hostname_check)
if self.project_name:
self.options[u'--project-name'] = self.project_name
if self.files:
self.options[u'--file'] = self.files
if not HAS_COMPOSE:
self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
HAS_COMPOSE_EXC)
if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
"Upgrade docker-compose to a min version of %s." %
(compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
self.log("options: ")
self.log(self.options, pretty_print=True)
if self.definition:
if not HAS_YAML:
self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
if not self.project_name:
self.client.fail("Parameter error - project_name required when providing definition.")
self.project_src = tempfile.mkdtemp(prefix="ansible")
compose_file = os.path.join(self.project_src, "docker-compose.yml")
try:
self.log('writing: ')
self.log(yaml.dump(self.definition, default_flow_style=False))
with open(compose_file, 'w') as f:
f.write(yaml.dump(self.definition, default_flow_style=False))
except Exception as exc:
self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
else:
if not self.project_src:
self.client.fail("Parameter error - project_src required.")
try:
self.log("project_src: %s" % self.project_src)
self.project = project_from_options(self.project_src, self.options)
except Exception as exc:
self.client.fail("Configuration error - %s" % str(exc))
def exec_module(self):
result = dict()
if self.state == 'present':
result = self.cmd_up()
elif self.state == 'absent':
result = self.cmd_down()
if self.definition:
compose_file = os.path.join(self.project_src, "docker-compose.yml")
self.log("removing %s" % compose_file)
os.remove(compose_file)
self.log("removing %s" % self.project_src)
os.rmdir(self.project_src)
if not self.check_mode and not self.debug and result.get('actions'):
result.pop('actions')
return result
def _get_auth_options(self):
options = dict()
for key, value in self.client.auth_params.items():
if value is not None:
option = AUTH_PARAM_MAPPING.get(key)
if option:
options[option] = value
return options
def cmd_up(self):
start_deps = self.dependencies
service_names = self.services
detached = True
result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
up_options = {
u'--no-recreate': False,
u'--build': False,
u'--no-build': False,
u'--no-deps': False,
u'--force-recreate': False,
}
if self.recreate == 'never':
up_options[u'--no-recreate'] = True
elif self.recreate == 'always':
up_options[u'--force-recreate'] = True
if self.remove_orphans:
up_options[u'--remove-orphans'] = True
converge = convergence_strategy_from_opts(up_options)
self.log("convergence strategy: %s" % converge)
if self.pull:
pull_output = self.cmd_pull()
result['changed'] = pull_output['changed']
result['actions'] += pull_output['actions']
if self.build:
build_output = self.cmd_build()
result['changed'] = build_output['changed']
result['actions'] += build_output['actions']
if self.remove_orphans:
containers = self.client.containers(
filters={
'label': [
'{0}={1}'.format(LABEL_PROJECT, self.project.name),
'{0}={1}'.format(LABEL_ONE_OFF, "False")
],
}
)
orphans = []
for container in containers:
service_name = container.get('Labels', {}).get(LABEL_SERVICE)
if service_name not in self.project.service_names:
orphans.append(service_name)
if orphans:
result['changed'] = True
for service in self.project.services:
if not service_names or service.name in service_names:
plan = service.convergence_plan(strategy=converge)
if plan.action != 'noop':
result['changed'] = True
result_action = dict(service=service.name)
result_action[plan.action] = []
for container in plan.containers:
result_action[plan.action].append(dict(
id=container.id,
name=container.name,
short_id=container.short_id,
))
result['actions'].append(result_action)
if not self.check_mode and result['changed']:
out_redir_name, err_redir_name = make_redirection_tempfiles()
try:
with stdout_redirector(out_redir_name):
with stderr_redirector(err_redir_name):
do_build = build_action_from_opts(up_options)
self.log('Setting do_build to %s' % do_build)
self.project.up(
service_names=service_names,
start_deps=start_deps,
strategy=converge,
do_build=do_build,
detached=detached,
remove_orphans=self.remove_orphans,
timeout=self.timeout)
except Exception as exc:
fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
msg_format="Error starting project %s")
self.client.fail(**fail_reason)
else:
cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
if self.stopped:
stop_output = self.cmd_stop(service_names)
result['changed'] = stop_output['changed']
result['actions'] += stop_output['actions']
if self.restarted:
restart_output = self.cmd_restart(service_names)
result['changed'] = restart_output['changed']
result['actions'] += restart_output['actions']
if self.scale:
scale_output = self.cmd_scale()
result['changed'] = scale_output['changed']
result['actions'] += scale_output['actions']
for service in self.project.services:
service_facts = dict()
result['ansible_facts'][service.name] = service_facts
result['services'][service.name] = service_facts
for container in service.containers(stopped=True):
inspection = container.inspect()
# pare down the inspection data to the most useful bits
facts = dict(
cmd=[],
labels=dict(),
image=None,
state=dict(
running=None,
status=None
),
networks=dict()
)
if inspection['Config'].get('Cmd', None) is not None:
facts['cmd'] = inspection['Config']['Cmd']
if inspection['Config'].get('Labels', None) is not None:
facts['labels'] = inspection['Config']['Labels']
if inspection['Config'].get('Image', None) is not None:
facts['image'] = inspection['Config']['Image']
if inspection['State'].get('Running', None) is not None:
facts['state']['running'] = inspection['State']['Running']
if inspection['State'].get('Status', None) is not None:
facts['state']['status'] = inspection['State']['Status']
if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
networks = inspection['NetworkSettings']['Networks']
for key in networks:
facts['networks'][key] = dict(
aliases=[],
globalIPv6=None,
globalIPv6PrefixLen=0,
IPAddress=None,
IPPrefixLen=0,
links=None,
macAddress=None,
)
if networks[key].get('Aliases', None) is not None:
facts['networks'][key]['aliases'] = networks[key]['Aliases']
if networks[key].get('GlobalIPv6Address', None) is not None:
facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
if networks[key].get('IPAddress', None) is not None:
facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
if networks[key].get('IPPrefixLen', None) is not None:
facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
if networks[key].get('Links', None) is not None:
facts['networks'][key]['links'] = networks[key]['Links']
if networks[key].get('MacAddress', None) is not None:
facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
service_facts[container.name] = facts
return result
def cmd_pull(self):
result = dict(
changed=False,
actions=[],
)
if not self.check_mode:
for service in self.project.get_services(self.services, include_deps=False):
if 'image' not in service.options:
continue
self.log('Pulling image for service %s' % service.name)
# store the existing image ID
old_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
old_image_id = image['Id']
except NoSuchImageError:
pass
except Exception as exc:
self.client.fail("Error: service image lookup failed - %s" % str(exc))
# pull the image
try:
service.pull(ignore_pull_failures=False)
except Exception as exc:
self.client.fail("Error: pull failed with %s" % str(exc))
# store the new image ID
new_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
new_image_id = image['Id']
except NoSuchImageError as exc:
self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
if new_image_id != old_image_id:
# if a new image was pulled
result['changed'] = True
result['actions'].append(dict(
service=service.name,
pulled_image=dict(
name=service.image_name,
id=new_image_id
)
))
return result
def cmd_build(self):
result = dict(
changed=False,
actions=[]
)
if not self.check_mode:
for service in self.project.get_services(self.services, include_deps=False):
if service.can_be_built():
self.log('Building image for service %s' % service.name)
# store the existing image ID
old_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
old_image_id = image['Id']
except NoSuchImageError:
pass
except Exception as exc:
self.client.fail("Error: service image lookup failed - %s" % str(exc))
# build the image
try:
new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
except Exception as exc:
self.client.fail("Error: build failed with %s" % str(exc))
if new_image_id not in old_image_id:
# if a new image was built
result['changed'] = True
result['actions'].append(dict(
service=service.name,
built_image=dict(
name=service.image_name,
id=new_image_id
)
))
return result
def cmd_down(self):
result = dict(
changed=False,
actions=[]
)
for service in self.project.services:
containers = service.containers(stopped=True)
if len(containers):
result['changed'] = True
result['actions'].append(dict(
service=service.name,
deleted=[container.name for container in containers]
))
if not self.check_mode and result['changed']:
image_type = image_type_from_opt('--rmi', self.remove_images)
try:
self.project.down(image_type, self.remove_volumes, self.remove_orphans)
except Exception as exc:
self.client.fail("Error stopping project - %s" % str(exc))
return result
def cmd_stop(self, service_names):
result = dict(
changed=False,
actions=[]
)
for service in self.project.services:
if not service_names or service.name in service_names:
service_res = dict(
service=service.name,
stop=[]
)
for container in service.containers(stopped=False):
result['changed'] = True
service_res['stop'].append(dict(
id=container.id,
name=container.name,
short_id=container.short_id
))
result['actions'].append(service_res)
if not self.check_mode and result['changed']:
out_redir_name, err_redir_name = make_redirection_tempfiles()
try:
with stdout_redirector(out_redir_name):
with stderr_redirector(err_redir_name):
self.project.stop(service_names=service_names, timeout=self.timeout)
except Exception as exc:
fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
msg_format="Error stopping project %s")
self.client.fail(**fail_reason)
else:
cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
return result
def cmd_restart(self, service_names):
result = dict(
changed=False,
actions=[]
)
for service in self.project.services:
if not service_names or service.name in service_names:
service_res = dict(
service=service.name,
restart=[]
)
for container in service.containers(stopped=True):
result['changed'] = True
service_res['restart'].append(dict(
id=container.id,
name=container.name,
short_id=container.short_id
))
result['actions'].append(service_res)
if not self.check_mode and result['changed']:
out_redir_name, err_redir_name = make_redirection_tempfiles()
try:
with stdout_redirector(out_redir_name):
with stderr_redirector(err_redir_name):
self.project.restart(service_names=service_names, timeout=self.timeout)
except Exception as exc:
fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
msg_format="Error restarting project %s")
self.client.fail(**fail_reason)
else:
cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
return result
def cmd_scale(self):
result = dict(
changed=False,
actions=[]
)
for service in self.project.services:
if service.name in self.scale:
service_res = dict(
service=service.name,
scale=0
)
containers = service.containers(stopped=True)
scale = self.parse_scale(service.name)
if len(containers) != scale:
result['changed'] = True
service_res['scale'] = scale - len(containers)
if not self.check_mode:
try:
service.scale(scale)
except Exception as exc:
self.client.fail("Error scaling %s - %s" % (service.name, str(exc)))
result['actions'].append(service_res)
return result
def parse_scale(self, service_name):
try:
return int(self.scale[service_name])
except ValueError:
self.client.fail("Error scaling %s - expected int, got %s",
service_name, str(type(self.scale[service_name])))
def main():
argument_spec = dict(
project_src=dict(type='path'),
project_name=dict(type='str',),
files=dict(type='list', elements='path'),
state=dict(type='str', default='present', choices=['absent', 'present']),
definition=dict(type='dict'),
hostname_check=dict(type='bool', default=False),
recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
build=dict(type='bool', default=False),
remove_images=dict(type='str', choices=['all', 'local']),
remove_volumes=dict(type='bool', default=False),
remove_orphans=dict(type='bool', default=False),
stopped=dict(type='bool', default=False),
restarted=dict(type='bool', default=False),
scale=dict(type='dict'),
services=dict(type='list', elements='str'),
dependencies=dict(type='bool', default=True),
pull=dict(type='bool', default=False),
nocache=dict(type='bool', default=False),
debug=dict(type='bool', default=False),
timeout=dict(type='int', default=DEFAULT_TIMEOUT)
)
mutually_exclusive = [
('definition', 'project_src'),
('definition', 'files')
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
min_docker_api_version='1.20',
)
if client.module._name == 'docker_service':
client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.", version='2.12')
result = ContainerManager(client).exec_module()
client.module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,108,804,277,155,227,600 | 34.98816 | 153 | 0.529191 | false |
realityone/flaskbb | flaskbb/extensions.py | 1 | 1474 | # -*- coding: utf-8 -*-
"""
flaskbb.extensions
~~~~~~~~~~~~~~~~~~
The extensions that are used by FlaskBB.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from celery import Celery
from flask_allows import Allows
from flask_sqlalchemy import SQLAlchemy
from flask_whooshee import Whooshee
from flask_login import LoginManager
from flask_mail import Mail
from flask_caching import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_redis import FlaskRedis
from flask_migrate import Migrate
from flask_themes2 import Themes
from flask_plugins import PluginManager
from flask_babelplus import Babel
from flask_wtf.csrf import CsrfProtect
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flaskbb.exceptions import AuthorizationRequired
# Permissions Manager
allows = Allows(throws=AuthorizationRequired)
# Database
db = SQLAlchemy()
# Whooshee (Full Text Search)
whooshee = Whooshee()
# Login
login_manager = LoginManager()
# Mail
mail = Mail()
# Caching
cache = Cache()
# Redis
redis_store = FlaskRedis()
# Debugtoolbar
debugtoolbar = DebugToolbarExtension()
# Migrations
migrate = Migrate()
# Themes
themes = Themes()
# PluginManager
plugin_manager = PluginManager()
# Babel
babel = Babel()
# CSRF
csrf = CsrfProtect()
# Rate Limiting
limiter = Limiter(auto_check=False, key_func=get_remote_address)
# Celery
celery = Celery("flaskbb")
| bsd-3-clause | 52,331,647,757,293,920 | 19.191781 | 64 | 0.75848 | false |
icedwater/opendoc | excel.py | 1 | 1452 | #! /usr/bin/env python
"""
Here's a small script based on a github gist to show diffs
between Excel files. I'll be using this to see if openpyxl
or xlrd is easier to use or they serve different use cases
entirely. Thanks, nmz787.
"""
import xlrd
import sys
def cell_on_line(excel_filename):
"""
Take an input file and display each cell on its own line.
"""
excel_file = xlrd.open_workbook(excel_filename)
if not excel_file:
raise Exception('The file provided was not an Excel file.')
worksheets = excel_file.sheet_names()
for sheet in worksheets:
current_sheet = excel_file.sheet_by_name(sheet)
cells = []
for row in xrange(current_sheet.nrows):
current_row = current_sheet.row(row)
if not current_row:
continue
row_as_string = "[{}: ({},".format(sheet, row)
for cell in xrange(current_sheet.ncols):
s = str(current_sheet.cell_value(row, cell))
s = s.replace(r"\\", "\\\\")
s = s.replace(r"\n", " ")
s = s.replace(r"\r", " ")
s = s.replace(r"\t", " ")
if s:
cells.append('{} {})] {}\n'.format(row_as_string, cell, s))
if cells:
return ''.join(cells)
if __name__ == '__main__':
filename = 'sample.xlsx'
print("Opening %s..." % filename)
print cell_on_line('sample.xlsx')
| mit | 7,909,886,015,887,775,000 | 28.632653 | 79 | 0.550964 | false |
cjaymes/pyscap | src/scap/model/xccdf_1_1/MetadataType.py | 1 | 1100 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
logger = logging.getLogger(__name__)
class MetadataType(Model):
MODEL_MAP = {
'elements': [
# TODO at least 1 element must be used
{'xmlns': 'http://purl.org/dc/elements/1.1/', 'tag_name': '*', 'min': 0, 'max': None},
{'xmlns': 'http://checklists.nist.gov/sccf/0.1', 'tag_name': '*', 'min': 0, 'max': None},
],
}
| gpl-3.0 | 364,053,764,359,543,600 | 35.666667 | 101 | 0.675455 | false |
0--key/lib | portfolio/Python/scrapy/wesco/wesco_spider.py | 2 | 1480 | import csv
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class CBCDirectSpider(BaseSpider):
name = 'cbcdirect.com'
allowed_domains = ['cbcdirect.com']
def start_requests(self):
with open(os.path.join(HERE, 'products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
url = 'http://www.cbcdirect.com/direct/Search/SearchResults.aspx?\
advSearch=1&searchFlag=2&partNoKeyword=simus&partNoKeywordValue=%s\
&searchAll=1&stringPos=1&yourCat=0&paperCatalog=1&gr=0&sortfield=simus&sortDirection=asc'
yield Request(url % row['UPC'], meta={'sku': row['Part #']})
def parse(self, response):
hxs = HtmlXPathSelector(response)
name = hxs.select('//span[@id="ProductDetail1_lblDescription"]//text()').extract()
if name:
loader = ProductLoader(item=Product(), response=response)
loader.add_value('name', name)
loader.add_value('url', response.url)
loader.add_xpath('price', '//*[@class="yourPriceText"]//text()')
loader.add_value('sku', response.meta['sku'])
yield loader.load_item()
| apache-2.0 | 2,747,492,294,643,822,000 | 36.948718 | 90 | 0.670946 | false |
gordonzola/naobot | irc/events.py | 2 | 4660 | # Numeric table based on the Perl's Net::IRC.
numeric = {
"001": "welcome",
"002": "yourhost",
"003": "created",
"004": "myinfo",
"005": "featurelist", # XXX
"200": "tracelink",
"201": "traceconnecting",
"202": "tracehandshake",
"203": "traceunknown",
"204": "traceoperator",
"205": "traceuser",
"206": "traceserver",
"207": "traceservice",
"208": "tracenewtype",
"209": "traceclass",
"210": "tracereconnect",
"211": "statslinkinfo",
"212": "statscommands",
"213": "statscline",
"214": "statsnline",
"215": "statsiline",
"216": "statskline",
"217": "statsqline",
"218": "statsyline",
"219": "endofstats",
"221": "umodeis",
"231": "serviceinfo",
"232": "endofservices",
"233": "service",
"234": "servlist",
"235": "servlistend",
"241": "statslline",
"242": "statsuptime",
"243": "statsoline",
"244": "statshline",
"250": "luserconns",
"251": "luserclient",
"252": "luserop",
"253": "luserunknown",
"254": "luserchannels",
"255": "luserme",
"256": "adminme",
"257": "adminloc1",
"258": "adminloc2",
"259": "adminemail",
"261": "tracelog",
"262": "endoftrace",
"263": "tryagain",
"265": "n_local",
"266": "n_global",
"300": "none",
"301": "away",
"302": "userhost",
"303": "ison",
"305": "unaway",
"306": "nowaway",
"311": "whoisuser",
"312": "whoisserver",
"313": "whoisoperator",
"314": "whowasuser",
"315": "endofwho",
"316": "whoischanop",
"317": "whoisidle",
"318": "endofwhois",
"319": "whoischannels",
"321": "liststart",
"322": "list",
"323": "listend",
"324": "channelmodeis",
"329": "channelcreate",
"331": "notopic",
"332": "currenttopic",
"333": "topicinfo",
"341": "inviting",
"342": "summoning",
"346": "invitelist",
"347": "endofinvitelist",
"348": "exceptlist",
"349": "endofexceptlist",
"351": "version",
"352": "whoreply",
"353": "namreply",
"361": "killdone",
"362": "closing",
"363": "closeend",
"364": "links",
"365": "endoflinks",
"366": "endofnames",
"367": "banlist",
"368": "endofbanlist",
"369": "endofwhowas",
"371": "info",
"372": "motd",
"373": "infostart",
"374": "endofinfo",
"375": "motdstart",
"376": "endofmotd",
"377": "motd2", # 1997-10-16 -- tkil
"381": "youreoper",
"382": "rehashing",
"384": "myportis",
"391": "time",
"392": "usersstart",
"393": "users",
"394": "endofusers",
"395": "nousers",
"401": "nosuchnick",
"402": "nosuchserver",
"403": "nosuchchannel",
"404": "cannotsendtochan",
"405": "toomanychannels",
"406": "wasnosuchnick",
"407": "toomanytargets",
"409": "noorigin",
"411": "norecipient",
"412": "notexttosend",
"413": "notoplevel",
"414": "wildtoplevel",
"421": "unknowncommand",
"422": "nomotd",
"423": "noadmininfo",
"424": "fileerror",
"431": "nonicknamegiven",
"432": "erroneusnickname", # Thiss iz how its speld in thee RFC.
"433": "nicknameinuse",
"436": "nickcollision",
"437": "unavailresource", # "Nick temporally unavailable"
"441": "usernotinchannel",
"442": "notonchannel",
"443": "useronchannel",
"444": "nologin",
"445": "summondisabled",
"446": "usersdisabled",
"451": "notregistered",
"461": "needmoreparams",
"462": "alreadyregistered",
"463": "nopermforhost",
"464": "passwdmismatch",
"465": "yourebannedcreep", # I love this one...
"466": "youwillbebanned",
"467": "keyset",
"471": "channelisfull",
"472": "unknownmode",
"473": "inviteonlychan",
"474": "bannedfromchan",
"475": "badchannelkey",
"476": "badchanmask",
"477": "nochanmodes", # "Channel doesn't support modes"
"478": "banlistfull",
"481": "noprivileges",
"482": "chanoprivsneeded",
"483": "cantkillserver",
"484": "restricted", # Connection is restricted
"485": "uniqopprivsneeded",
"491": "nooperhost",
"492": "noservicehost",
"501": "umodeunknownflag",
"502": "usersdontmatch",
}
generated = [
"dcc_connect",
"dcc_disconnect",
"dccmsg",
"disconnect",
"ctcp",
"ctcpreply",
]
protocol = [
"error",
"join",
"kick",
"mode",
"part",
"ping",
"privmsg",
"privnotice",
"pubmsg",
"pubnotice",
"quit",
"invite",
"pong",
"action",
"topic",
"nick",
]
all = generated + protocol + numeric.values()
| agpl-3.0 | -3,814,749,747,236,175,400 | 23.397906 | 69 | 0.529399 | false |
cerrno/neurokernel | neurokernel/mixins.py | 2 | 1930 | #!/usr/bin/env python
"""
Object with built-in logger.
"""
import twiggy
class LoggerMixin(object):
"""
Mixin that provides a per-instance logger that can be turned off.
Parameters
----------
name : str
Name to assign logger.
log_on : bool
Initial value to assign to class instance's `log_on` property.
Attributes
----------
log_on : bool
If set to False, the logger's methods will silently
do nothing when called.
Methods
-------
log_debug(), log_info(), log_warning(), log_error(), log_critical()
Emit a log message at the level corresponding to the method name.
"""
def __init__(self, name, log_on=True):
super(LoggerMixin, self).__init__()
self.logger = twiggy.log.name(name)
self.log_on = log_on
@property
def log_on(self):
"""
Logger switch. If False, the logging methods silently do nothing.
"""
return self._log_on
@log_on.setter
def log_on(self, value):
self._log_on = bool(value)
if self._log_on:
self.log_debug = self.logger.debug
self.log_info = self.logger.info
self.log_warning = self.logger.warning
self.log_error = self.logger.error
self.log_critical = self.logger.critical
else:
self.log_debug = lambda x: None
self.log_info = lambda x: None
self.log_warning = lambda x: None
self.log_error = lambda x: None
self.log_critical = lambda x: None
if __name__ == '__main__':
import sys
output = twiggy.outputs.StreamOutput(twiggy.formats.line_format,
stream=sys.stdout)
twiggy.emitters['*'] = twiggy.filters.Emitter(twiggy.levels.DEBUG, True, output)
l = LoggerMixin('foo')
l.log_info('test')
l.log_on = False
l.log_info('test')
| bsd-3-clause | 3,982,794,631,024,196,000 | 26.571429 | 84 | 0.569948 | false |
virtualrobotix/ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/STM32F469xx.py | 21 | 21308 | #!/usr/bin/env python
'''
these tables are generated from the STM32 datasheet stm32f469ii.pdf
'''
# additional build information for ChibiOS
build = {
"CHIBIOS_STARTUP_MK" : "os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f4xx.mk",
"CHIBIOS_PLATFORM_MK" : "os/hal/ports/STM32/STM32F4xx/platform.mk"
}
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 8,
'I': 12,
'J': 0,
'K': 0
}
# MCU parameters
mcu = {
# location of MCU serial number
'UDID_START' : 0x1FFF7A10,
# ram map, as list of (address, size-kb, flags)
# flags of 1 means DMA-capable
# flags of 2 means faster memory for CPU intensive work
'RAM_MAP' : [
(0x20000000, 320, 1), # main memory, DMA safe
(0x10000000, 64, 1), # CCM memory, faster, not DMA safe
]
}
DMA_Map = {
# format is (DMA_TABLE, StreamNum, Channel)
# extracted from tabula-STM32F4x7-dma.csv
"ADC1" : [(2,0,0),(2,4,0)],
"ADC2" : [(2,2,1),(2,3,1)],
"ADC3" : [(2,0,2),(2,1,2)],
"CRYP_IN" : [(2,6,2)],
"CRYP_OUT" : [(2,5,2)],
"DAC1" : [(1,5,7)],
"DAC2" : [(1,6,7)],
"DCMI" : [(2,1,1),(2,7,1)],
"HASH_IN" : [(2,7,2)],
"I2C1_RX" : [(1,0,1),(1,5,1)],
"I2C1_TX" : [(1,6,1),(1,7,1)],
"I2C2_RX" : [(1,2,7),(1,3,7)],
"I2C2_TX" : [(1,7,7)],
"I2C3_RX" : [(1,2,3)],
"I2C3_TX" : [(1,4,3)],
"I2S2_EXT_RX" : [(1,3,3)],
"I2S2_EXT_TX" : [(1,4,2)],
"I2S3_EXT_RX" : [(1,2,2),(1,0,3)],
"I2S3_EXT_TX" : [(1,5,2)],
"SAI1_A" : [(2,1,0),(2,3,0)],
"SAI1_B" : [(2,5,0),(2,4,1)],
"SDIO" : [(2,3,4),(2,6,4)],
"SPI1_RX" : [(2,0,3),(2,2,3)],
"SPI1_TX" : [(2,3,3),(2,5,3)],
"SPI2_RX" : [(1,3,0)],
"SPI2_TX" : [(1,4,0)],
"SPI3_RX" : [(1,0,0),(1,2,0)],
"SPI3_TX" : [(1,5,0),(1,7,0)],
"SPI4_RX" : [(2,0,4),(2,3,5)],
"SPI4_TX" : [(2,1,4),(2,4,5)],
"SPI5_RX" : [(2,3,2),(2,5,7)],
"SPI5_TX" : [(2,4,2),(2,6,7)],
"SPI6_RX" : [(2,6,1)],
"SPI6_TX" : [(2,5,1)],
"TIM1_CH1" : [(2,6,0),(2,1,6),(2,3,6)],
"TIM1_CH2" : [(2,6,0),(2,2,6)],
"TIM1_CH3" : [(2,6,0),(2,6,6)],
"TIM1_CH4" : [(2,4,6)],
"TIM1_COM" : [(2,4,6)],
"TIM1_TRIG" : [(2,0,6),(2,4,6)],
"TIM1_UP" : [(2,5,6)],
"TIM2_CH1" : [(1,5,3)],
"TIM2_CH2" : [(1,6,3)],
"TIM2_CH3" : [(1,1,3)],
"TIM2_CH4" : [(1,6,3),(1,7,3)],
"TIM2_UP" : [(1,1,3),(1,7,3)],
"TIM3_CH1" : [(1,4,5)],
"TIM3_CH2" : [(1,5,5)],
"TIM3_CH3" : [(1,7,5)],
"TIM3_CH4" : [(1,2,5)],
"TIM3_TRIG" : [(1,4,5)],
"TIM3_UP" : [(1,2,5)],
"TIM4_CH1" : [(1,0,2)],
"TIM4_CH2" : [(1,3,2)],
"TIM4_CH3" : [(1,7,2)],
"TIM4_UP" : [(1,6,2)],
"TIM5_CH1" : [(1,2,6)],
"TIM5_CH2" : [(1,4,6)],
"TIM5_CH3" : [(1,0,6)],
"TIM5_CH4" : [(1,1,6),(1,3,6)],
"TIM5_TRIG" : [(1,1,6),(1,3,6)],
"TIM5_UP" : [(1,0,6),(1,6,6)],
"TIM6_UP" : [(1,1,7)],
"TIM7_UP" : [(1,2,1),(1,4,1)],
"TIM8_CH1" : [(2,2,0),(2,2,7)],
"TIM8_CH2" : [(2,2,0),(2,3,7)],
"TIM8_CH3" : [(2,2,0),(2,4,7)],
"TIM8_CH4" : [(2,7,7)],
"TIM8_COM" : [(2,7,7)],
"TIM8_TRIG" : [(2,7,7)],
"TIM8_UP" : [(2,1,7)],
"UART4_RX" : [(1,2,4)],
"UART4_TX" : [(1,4,4)],
"UART5_RX" : [(1,0,4)],
"UART5_TX" : [(1,7,4)],
"UART7_RX" : [(1,3,5)],
"UART7_TX" : [(1,1,5)],
"UART8_RX" : [(1,6,5)],
"UART8_TX" : [(1,0,5)],
"USART1_RX" : [(2,2,4),(2,5,4)],
"USART1_TX" : [(2,7,4)],
"USART2_RX" : [(1,5,4)],
"USART2_TX" : [(1,6,4)],
"USART3_RX" : [(1,1,4)],
"USART3_TX" : [(1,3,4),(1,4,7)],
"USART6_RX" : [(2,1,5),(2,2,5)],
"USART6_TX" : [(2,6,5),(2,7,5)],
}
AltFunction_map = {
# format is PIN:FUNCTION : AFNUM
"PA0:ETH_MII_CRS" : 11,
"PA0:EVENTOUT" : 15,
"PA0:TIM2_CH1" : 1,
"PA0:TIM2_ETR" : 1,
"PA0:TIM5_CH1" : 2,
"PA0:TIM8_ETR" : 3,
"PA0:UART4_TX" : 8,
"PA0:USART2_CTS" : 7,
"PA10:DCMI_D1" : 13,
"PA10:EVENTOUT" : 15,
"PA10:OTG_FS_ID" : 10,
"PA10:TIM1_CH3" : 1,
"PA10:USART1_RX" : 7,
"PA11:CAN1_RX" : 9,
"PA11:EVENTOUT" : 15,
"PA11:LCD_R4" : 14,
"PA11:OTG_FS_DM" : 10,
"PA11:TIM1_CH4" : 1,
"PA11:USART1_CTS" : 7,
"PA12:CAN1_TX" : 9,
"PA12:EVENTOUT" : 15,
"PA12:LCD_R5" : 14,
"PA12:OTG_FS_DP" : 10,
"PA12:TIM1_ETR" : 1,
"PA12:USART1_RTS" : 7,
"PA13:EVENTOUT" : 15,
"PA13:JTMS-SWDIO" : 0,
"PA14:EVENTOUT" : 15,
"PA14:JTCK-SWCLK" : 0,
"PA15:EVENTOUT" : 15,
"PA15:I2S3_WS" : 6,
"PA15:JTDI" : 0,
"PA15:SPI1_NSS" : 5,
"PA15:SPI3_NSS" : 6,
"PA15:TIM2_CH1" : 1,
"PA15:TIM2_ETR" : 1,
"PA1:ETH_MII_RX_CLK" : 11,
"PA1:ETH_RMII_REF_CLK" : 11,
"PA1:EVENTOUT" : 15,
"PA1:TIM2_CH2" : 1,
"PA1:TIM5_CH2" : 2,
"PA1:UART4_RX" : 8,
"PA1:USART2_RTS" : 7,
"PA2:ETH_MDIO" : 11,
"PA2:EVENTOUT" : 15,
"PA2:TIM2_CH3" : 1,
"PA2:TIM5_CH3" : 2,
"PA2:TIM9_CH1" : 3,
"PA2:USART2_TX" : 7,
"PA3:ETH_MII_COL" : 11,
"PA3:EVENTOUT" : 15,
"PA3:LCD_B5" : 14,
"PA3:OTG_HS_ULPI_D0" : 10,
"PA3:TIM2_CH4" : 1,
"PA3:TIM5_CH4" : 2,
"PA3:TIM9_CH2" : 3,
"PA3:USART2_RX" : 7,
"PA4:DCMI_HSYNC" : 13,
"PA4:EVENTOUT" : 15,
"PA4:I2S3_WS" : 6,
"PA4:LCD_VSYNC" : 14,
"PA4:OTG_HS_SOF" : 12,
"PA4:SPI1_NSS" : 5,
"PA4:SPI3_NSS" : 6,
"PA4:USART2_CK" : 7,
"PA5:EVENTOUT" : 15,
"PA5:OTG_HS_ULPI_CK" : 10,
"PA5:SPI1_SCK" : 5,
"PA5:TIM2_CH1" : 1,
"PA5:TIM2_ETR" : 1,
"PA5:TIM8_CH1N" : 3,
"PA6:DCMI_PIXCLK" : 13,
"PA6:EVENTOUT" : 15,
"PA6:LCD_G2" : 14,
"PA6:SPI1_MISO" : 5,
"PA6:TIM13_CH1" : 9,
"PA6:TIM1_BKIN" : 1,
"PA6:TIM3_CH1" : 2,
"PA6:TIM8_BKIN" : 3,
"PA7:ETH_MII_RX_DV" : 11,
"PA7:ETH_RMII_CRS_DV" : 11,
"PA7:EVENTOUT" : 15,
"PA7:SPI1_MOSI" : 5,
"PA7:TIM14_CH1" : 9,
"PA7:TIM1_CH1N" : 1,
"PA7:TIM3_CH2" : 2,
"PA7:TIM8_CH1N" : 3,
"PA8:EVENTOUT" : 15,
"PA8:I2C3_SCL" : 4,
"PA8:LCD_R6" : 14,
"PA8:MCO1" : 0,
"PA8:OTG_FS_SOF" : 10,
"PA8:TIM1_CH1" : 1,
"PA8:USART1_CK" : 7,
"PA9:DCMI_D0" : 13,
"PA9:EVENTOUT" : 15,
"PA9:I2C3_SMBA" : 4,
"PA9:TIM1_CH2" : 1,
"PA9:USART1_TX" : 7,
"PB0:ETH_MII_RXD2" : 11,
"PB0:EVENTOUT" : 15,
"PB0:LCD_R3" : 9,
"PB0:OTG_HS_ULPI_D1" : 10,
"PB0:TIM1_CH2N" : 1,
"PB0:TIM3_CH3" : 2,
"PB0:TIM8_CH2N" : 3,
"PB10:ETH_MII_RX_ER" : 11,
"PB10:EVENTOUT" : 15,
"PB10:I2C2_SCL" : 4,
"PB10:I2S2_CK" : 5,
"PB10:LCD_G4" : 14,
"PB10:OTG_HS_ULPI_D3" : 10,
"PB10:SPI2_SCK" : 5,
"PB10:TIM2_CH3" : 1,
"PB10:USART3_TX" : 7,
"PB11:ETH_MII_TX_EN" : 11,
"PB11:ETH_RMII_TX_EN" : 11,
"PB11:EVENTOUT" : 15,
"PB11:I2C2_SDA" : 4,
"PB11:LCD_G5" : 14,
"PB11:OTG_HS_ULPI_D4" : 10,
"PB11:TIM2_CH4" : 1,
"PB11:USART3_RX" : 7,
"PB12:CAN2_RX" : 9,
"PB12:ETH_MII_TXD0" : 11,
"PB12:ETH_RMII_TXD0" : 11,
"PB12:EVENTOUT" : 15,
"PB12:I2C2_SMBA" : 4,
"PB12:I2S2_WS" : 5,
"PB12:OTG_HS_ID" : 12,
"PB12:OTG_HS_ULPI_D5" : 10,
"PB12:SPI2_NSS" : 5,
"PB12:TIM1_BKIN" : 1,
"PB12:USART3_CK" : 7,
"PB13:CAN2_TX" : 9,
"PB13:ETH_MII_TXD1" : 11,
"PB13:ETH_RMII_TXD1" : 11,
"PB13:EVENTOUT" : 15,
"PB13:I2S2_CK" : 5,
"PB13:OTG_HS_ULPI_D6" : 10,
"PB13:SPI2_SCK" : 5,
"PB13:TIM1_CH1N" : 1,
"PB13:USART3_CTS" : 7,
"PB14:EVENTOUT" : 15,
"PB14:I2S2EXT_SD" : 6,
"PB14:OTG_HS_DM" : 12,
"PB14:SPI2_MISO" : 5,
"PB14:TIM12_CH1" : 9,
"PB14:TIM1_CH2N" : 1,
"PB14:TIM8_CH2N" : 3,
"PB14:USART3_RTS" : 7,
"PB15:EVENTOUT" : 15,
"PB15:I2S2_SD" : 5,
"PB15:OTG_HS_DP" : 12,
"PB15:RTC_REFIN" : 0,
"PB15:SPI2_MOSI" : 5,
"PB15:TIM12_CH2" : 9,
"PB15:TIM1_CH3N" : 1,
"PB15:TIM8_CH3N" : 3,
"PB1:ETH_MII_RXD3" : 11,
"PB1:EVENTOUT" : 15,
"PB1:LCD_R6" : 9,
"PB1:OTG_HS_ULPI_D2" : 10,
"PB1:TIM1_CH3N" : 1,
"PB1:TIM3_CH4" : 2,
"PB1:TIM8_CH3N" : 3,
"PB2:EVENTOUT" : 15,
"PB3:EVENTOUT" : 15,
"PB3:I2S3_CK" : 6,
"PB3:JTDO" : 0,
"PB3:SPI1_SCK" : 5,
"PB3:SPI3_SCK" : 6,
"PB3:TIM2_CH2" : 1,
"PB3:TRACESWO" : 0,
"PB4:EVENTOUT" : 15,
"PB4:I2S3EXT_SD" : 7,
"PB4:NJTRST" : 0,
"PB4:SPI1_MISO" : 5,
"PB4:SPI3_MISO" : 6,
"PB4:TIM3_CH1" : 2,
"PB5:CAN2_RX" : 9,
"PB5:DCMI_D10" : 13,
"PB5:ETH_PPS_OUT" : 11,
"PB5:EVENTOUT" : 15,
"PB5:FMC_SDCKE1" : 12,
"PB5:I2C1_SMBA" : 4,
"PB5:I2S3_SD" : 6,
"PB5:OTG_HS_ULPI_D7" : 10,
"PB5:SPI1_MOSI" : 5,
"PB5:SPI3_MOSI" : 6,
"PB5:TIM3_CH2" : 2,
"PB6:CAN2_TX" : 9,
"PB6:DCMI_D5" : 13,
"PB6:EVENTOUT" : 15,
"PB6:FMC_SDNE1" : 12,
"PB6:I2C1_SCL" : 4,
"PB6:TIM4_CH1" : 2,
"PB6:USART1_TX" : 7,
"PB7:DCMI_VSYNC" : 13,
"PB7:EVENTOUT" : 15,
"PB7:FMC_NL" : 12,
"PB7:I2C1_SDA" : 4,
"PB7:TIM4_CH2" : 2,
"PB7:USART1_RX" : 7,
"PB8:CAN1_RX" : 9,
"PB8:DCMI_D6" : 13,
"PB8:ETH_MII_TXD3" : 11,
"PB8:EVENTOUT" : 15,
"PB8:I2C1_SCL" : 4,
"PB8:LCD_B6" : 14,
"PB8:SDIO_D4" : 12,
"PB8:TIM10_CH1" : 3,
"PB8:TIM4_CH3" : 2,
"PB9:CAN1_TX" : 9,
"PB9:DCMI_D7" : 13,
"PB9:EVENTOUT" : 15,
"PB9:I2C1_SDA" : 4,
"PB9:I2S2_WS" : 5,
"PB9:LCD_B7" : 14,
"PB9:SDIO_D5" : 12,
"PB9:SPI2_NSS" : 5,
"PB9:TIM11_CH1" : 3,
"PB9:TIM4_CH4" : 2,
"PC0:EVENTOUT" : 15,
"PC0:FMC_SDNWE" : 12,
"PC0:OTG_HS_ULPI_STP" : 10,
"PC10:DCMI_D8" : 13,
"PC10:EVENTOUT" : 15,
"PC10:I2S3_CK" : 6,
"PC10:LCD_R2" : 14,
"PC10:SDIO_D2" : 12,
"PC10:SPI3_SCK" : 6,
"PC10:UART4_TX" : 8,
"PC10:USART3_TX" : 7,
"PC11:DCMI_D4" : 13,
"PC11:EVENTOUT" : 15,
"PC11:I2S3EXT_SD" : 5,
"PC11:SDIO_D3" : 12,
"PC11:SPI3_MISO" : 6,
"PC11:UART4_RX" : 8,
"PC11:USART3_RX" : 7,
"PC12:DCMI_D9" : 13,
"PC12:EVENTOUT" : 15,
"PC12:I2S3_SD" : 6,
"PC12:SDIO_CK" : 12,
"PC12:SPI3_MOSI" : 6,
"PC12:UART5_TX" : 8,
"PC12:USART3_CK" : 7,
"PC13:EVENTOUT" : 15,
"PC14:EVENTOUT" : 15,
"PC15:EVENTOUT" : 15,
"PC1:ETH_MDC" : 11,
"PC1:EVENTOUT" : 15,
"PC2:ETH_MII_TXD2" : 11,
"PC2:EVENTOUT" : 15,
"PC2:FMC_SDNE0" : 12,
"PC2:I2S2EXT_SD" : 6,
"PC2:OTG_HS_ULPI_DIR" : 10,
"PC2:SPI2_MISO" : 5,
"PC3:ETH_MII_TX_CLK" : 11,
"PC3:EVENTOUT" : 15,
"PC3:FMC_SDCKE0" : 12,
"PC3:I2S2_SD" : 5,
"PC3:OTG_HS_ULPI_NXT" : 10,
"PC3:SPI2_MOSI" : 5,
"PC4:ETH_MII_RXD0" : 11,
"PC4:ETH_RMII_RXD0" : 11,
"PC4:EVENTOUT" : 15,
"PC5:ETH_MII_RXD1" : 11,
"PC5:ETH_RMII_RXD1" : 11,
"PC5:EVENTOUT" : 15,
"PC6:DCMI_D0" : 13,
"PC6:EVENTOUT" : 15,
"PC6:I2S2_MCK" : 5,
"PC6:LCD_HSYNC" : 14,
"PC6:SDIO_D6" : 12,
"PC6:TIM3_CH1" : 2,
"PC6:TIM8_CH1" : 3,
"PC6:USART6_TX" : 8,
"PC7:DCMI_D1" : 13,
"PC7:EVENTOUT" : 15,
"PC7:I2S3_MCK" : 6,
"PC7:LCD_G6" : 14,
"PC7:SDIO_D7" : 12,
"PC7:TIM3_CH2" : 2,
"PC7:TIM8_CH2" : 3,
"PC7:USART6_RX" : 8,
"PC8:DCMI_D2" : 13,
"PC8:EVENTOUT" : 15,
"PC8:SDIO_D0" : 12,
"PC8:TIM3_CH3" : 2,
"PC8:TIM8_CH3" : 3,
"PC8:USART6_CK" : 8,
"PC9:DCMI_D3" : 13,
"PC9:EVENTOUT" : 15,
"PC9:I2C3_SDA" : 4,
"PC9:I2S_CKIN" : 5,
"PC9:MCO2" : 0,
"PC9:SDIO_D1" : 12,
"PC9:TIM3_CH4" : 2,
"PC9:TIM8_CH4" : 3,
"PD0:CAN1_RX" : 9,
"PD0:EVENTOUT" : 15,
"PD0:FMC_D2" : 12,
"PD10:EVENTOUT" : 15,
"PD10:FMC_D15" : 12,
"PD10:LCD_B3" : 14,
"PD10:USART3_CK" : 7,
"PD11:EVENTOUT" : 15,
"PD11:FMC_A16" : 12,
"PD11:USART3_CTS" : 7,
"PD12:EVENTOUT" : 15,
"PD12:FMC_A17" : 12,
"PD12:TIM4_CH1" : 2,
"PD12:USART3_RTS" : 7,
"PD13:EVENTOUT" : 15,
"PD13:FMC_A18" : 12,
"PD13:TIM4_CH2" : 2,
"PD14:EVENTOUT" : 15,
"PD14:FMC_D0" : 12,
"PD14:TIM4_CH3" : 2,
"PD15:EVENTOUT" : 15,
"PD15:FMC_D1" : 12,
"PD15:TIM4_CH4" : 2,
"PD1:CAN1_TX" : 9,
"PD1:EVENTOUT" : 15,
"PD1:FMC_D3" : 12,
"PD2:DCMI_D11" : 13,
"PD2:EVENTOUT" : 15,
"PD2:SDIO_CMD" : 12,
"PD2:TIM3_ETR" : 2,
"PD2:UART5_RX" : 8,
"PD3:DCMI_D5" : 13,
"PD3:EVENTOUT" : 15,
"PD3:FMC_CLK" : 12,
"PD3:I2S2_CK" : 5,
"PD3:LCD_G7" : 14,
"PD3:SPI2_SCK" : 5,
"PD3:USART2_CTS" : 7,
"PD4:EVENTOUT" : 15,
"PD4:FMC_NOE" : 12,
"PD4:USART2_RTS" : 7,
"PD5:EVENTOUT" : 15,
"PD5:FMC_NWE" : 12,
"PD5:USART2_TX" : 7,
"PD6:DCMI_D10" : 13,
"PD6:EVENTOUT" : 15,
"PD6:FMC_NWAIT" : 12,
"PD6:I2S3_SD" : 5,
"PD6:LCD_B2" : 14,
"PD6:SAI1_SD_A" : 6,
"PD6:SPI3_MOSI" : 5,
"PD6:USART2_RX" : 7,
"PD7:EVENTOUT" : 15,
"PD7:FMC_NCE2" : 12,
"PD7:FMC_NE1" : 12,
"PD7:USART2_CK" : 7,
"PD8:EVENTOUT" : 15,
"PD8:FMC_D13" : 12,
"PD8:USART3_TX" : 7,
"PD9:EVENTOUT" : 15,
"PD9:FMC_D14" : 12,
"PD9:USART3_RX" : 7,
"PE0:DCMI_D2" : 13,
"PE0:EVENTOUT" : 15,
"PE0:FMC_NBL0" : 12,
"PE0:TIM4_ETR" : 2,
"PE0:UART8_RX" : 8,
"PE10:EVENTOUT" : 15,
"PE10:FMC_D7" : 12,
"PE10:TIM1_CH2N" : 1,
"PE11:EVENTOUT" : 15,
"PE11:FMC_D8" : 12,
"PE11:LCD_G3" : 14,
"PE11:SPI4_NSS" : 5,
"PE11:TIM1_CH2" : 1,
"PE12:EVENTOUT" : 15,
"PE12:FMC_D9" : 12,
"PE12:LCD_B4" : 14,
"PE12:SPI4_SCK" : 5,
"PE12:TIM1_CH3N" : 1,
"PE13:EVENTOUT" : 15,
"PE13:FMC_D10" : 12,
"PE13:LCD_DE" : 14,
"PE13:SPI4_MISO" : 5,
"PE13:TIM1_CH3" : 1,
"PE14:EVENTOUT" : 15,
"PE14:FMC_D11" : 12,
"PE14:LCD_CLK" : 14,
"PE14:SPI4_MOSI" : 5,
"PE14:TIM1_CH4" : 1,
"PE15:" : 5,
"PE15:EVENTOUT" : 15,
"PE15:FMC_D12" : 12,
"PE15:LCD_R7" : 14,
"PE15:TIM1_BKIN" : 1,
"PE1:DCMI_D3" : 13,
"PE1:EVENTOUT" : 15,
"PE1:FMC_NBL1" : 12,
"PE1:UART8_TX" : 8,
"PE2:ETH_MII_TXD3" : 11,
"PE2:EVENTOUT" : 15,
"PE2:FMC_A23" : 12,
"PE2:SAI1_MCLK_A" : 6,
"PE2:SPI4_SCK" : 5,
"PE2:TRACECLK" : 0,
"PE3:EVENTOUT" : 15,
"PE3:FMC_A19" : 12,
"PE3:SAI1_SD_B" : 6,
"PE3:TRACED0" : 0,
"PE4:DCMI_D4" : 13,
"PE4:EVENTOUT" : 15,
"PE4:FMC_A20" : 12,
"PE4:LCD_B0" : 14,
"PE4:SAI1_FS_A" : 6,
"PE4:SPI4_NSS" : 5,
"PE4:TRACED1" : 0,
"PE5:DCMI_D6" : 13,
"PE5:EVENTOUT" : 15,
"PE5:FMC_A21" : 12,
"PE5:LCD_G0" : 14,
"PE5:SAI1_SCK_A" : 6,
"PE5:SPI4_MISO" : 5,
"PE5:TIM9_CH1" : 3,
"PE5:TRACED2" : 0,
"PE6:DCMI_D7" : 13,
"PE6:EVENTOUT" : 15,
"PE6:FMC_A22" : 12,
"PE6:LCD_G1" : 14,
"PE6:SAI1_SD_A" : 6,
"PE6:SPI4_MOSI" : 5,
"PE6:TIM9_CH2" : 3,
"PE6:TRACED3" : 0,
"PE7:EVENTOUT" : 15,
"PE7:FMC_D4" : 12,
"PE7:TIM1_ETR" : 1,
"PE7:UART7_RX" : 8,
"PE8:EVENTOUT" : 15,
"PE8:FMC_D5" : 12,
"PE8:TIM1_CH1N" : 1,
"PE8:UART7_TX" : 8,
"PE9:EVENTOUT" : 15,
"PE9:FMC_D6" : 12,
"PE9:TIM1_CH1" : 1,
"PF0:EVENTOUT" : 15,
"PF0:FMC_A0" : 12,
"PF0:I2C2_SDA" : 4,
"PF10:DCMI_D11" : 13,
"PF10:EVENTOUT" : 15,
"PF10:FMC_INTR" : 12,
"PF10:LCD_DE" : 14,
"PF11:DCMI_D12" : 13,
"PF11:EVENTOUT" : 15,
"PF11:FMC_SDNRAS" : 12,
"PF11:SPI5_MOSI" : 5,
"PF12:EVENTOUT" : 15,
"PF12:FMC_A6" : 12,
"PF13:EVENTOUT" : 15,
"PF13:FMC_A7" : 12,
"PF14:EVENTOUT" : 15,
"PF14:FMC_A8" : 12,
"PF15:EVENTOUT" : 15,
"PF15:FMC_A9" : 12,
"PF1:" : 3,
"PF1:EVENTOUT" : 15,
"PF1:FMC_A1" : 12,
"PF1:I2C2_SCL" : 4,
"PF2:EVENTOUT" : 15,
"PF2:FMC_A2" : 12,
"PF2:I2C2_SMBA" : 4,
"PF3:" : 4,
"PF3:EVENTOUT" : 15,
"PF3:FMC_A3" : 12,
"PF4:" : 4,
"PF4:EVENTOUT" : 15,
"PF4:FMC_A4" : 12,
"PF5:" : 4,
"PF5:EVENTOUT" : 15,
"PF5:FMC_A5" : 12,
"PF6:EVENTOUT" : 15,
"PF6:FMC_NIORD" : 12,
"PF6:SAI1_SD_B" : 6,
"PF6:SPI5_NSS" : 5,
"PF6:TIM10_CH1" : 3,
"PF6:UART7_RX" : 8,
"PF7:EVENTOUT" : 15,
"PF7:FMC_NREG" : 12,
"PF7:SAI1_MCLK_B" : 6,
"PF7:SPI5_SCK" : 5,
"PF7:TIM11_CH1" : 3,
"PF7:UART7_TX" : 8,
"PF8:EVENTOUT" : 15,
"PF8:FMC_NIOWR" : 12,
"PF8:SAI1_SCK_B" : 6,
"PF8:SPI5_MISO" : 5,
"PF8:TIM13_CH1" : 9,
"PF9:EVENTOUT" : 15,
"PF9:FMC_CD" : 12,
"PF9:SAI1_FS_B" : 6,
"PF9:SPI5_MOSI" : 5,
"PF9:TIM14_CH1" : 9,
"PG0:EVENTOUT" : 15,
"PG0:FMC_A10" : 12,
"PG10:DCMI_D2" : 13,
"PG10:EVENTOUT" : 15,
"PG10:FMC_NCE4_1" : 12,
"PG10:FMC_NE3" : 12,
"PG10:LCD_B2" : 14,
"PG10:LCD_G3" : 9,
"PG11:DCMI_D3" : 13,
"PG11:ETH_MII_TX_EN" : 11,
"PG11:ETH_RMII_TX_EN" : 11,
"PG11:EVENTOUT" : 15,
"PG11:FMC_NCE4_2" : 12,
"PG11:LCD_B3" : 14,
"PG12:EVENTOUT" : 15,
"PG12:FMC_NE4" : 12,
"PG12:LCD_B1" : 14,
"PG12:LCD_B4" : 9,
"PG12:SPI6_MISO" : 5,
"PG12:USART6_RTS" : 8,
"PG13:ETH_MII_TXD0" : 11,
"PG13:ETH_RMII_TXD0" : 11,
"PG13:EVENTOUT" : 15,
"PG13:FMC_A24" : 12,
"PG13:SPI6_SCK" : 5,
"PG13:USART6_CTS" : 8,
"PG14:ETH_MII_TXD1" : 11,
"PG14:ETH_RMII_TXD1" : 11,
"PG14:EVENTOUT" : 15,
"PG14:FMC_A25" : 12,
"PG14:SPI6_MOSI" : 5,
"PG14:USART6_TX" : 8,
"PG15:DCMI_D13" : 13,
"PG15:EVENTOUT" : 15,
"PG15:FMC_SDNCAS" : 12,
"PG15:USART6_CTS" : 8,
"PG1:EVENTOUT" : 15,
"PG1:FMC_A11" : 12,
"PG2:EVENTOUT" : 15,
"PG2:FMC_A12" : 12,
"PG3:EVENTOUT" : 15,
"PG3:FMC_A13" : 12,
"PG4:EVENTOUT" : 15,
"PG4:FMC_A14" : 12,
"PG4:FMC_BA0" : 12,
"PG5:EVENTOUT" : 15,
"PG5:FMC_A15" : 12,
"PG5:FMC_BA1" : 12,
"PG6:DCMI_D12" : 13,
"PG6:EVENTOUT" : 15,
"PG6:FMC_INT2" : 12,
"PG6:LCD_R7" : 14,
"PG7:DCMI_D13" : 13,
"PG7:EVENTOUT" : 15,
"PG7:FMC_INT3" : 12,
"PG7:LCD_CLK" : 14,
"PG7:USART6_CK" : 8,
"PG8:ETH_PPS_OUT" : 11,
"PG8:EVENTOUT" : 15,
"PG8:FMC_SDCLK" : 12,
"PG8:SPI6_NSS" : 5,
"PG8:USART6_RTS" : 8,
"PG9:DCMI_VSYNC(1)" : 13,
"PG9:EVENTOUT" : 15,
"PG9:FMC_NCE3" : 12,
"PG9:FMC_NE2" : 12,
"PG9:USART6_RX" : 8,
"PH0:EVENTOUT" : 15,
"PH10:DCMI_D1" : 13,
"PH10:EVENTOUT" : 15,
"PH10:FMC_D18" : 12,
"PH10:LCD_R4" : 14,
"PH10:TIM5_CH1" : 2,
"PH11:DCMI_D2" : 13,
"PH11:EVENTOUT" : 15,
"PH11:FMC_D19" : 12,
"PH11:LCD_R5" : 14,
"PH11:TIM5_CH2" : 2,
"PH12:DCMI_D3" : 13,
"PH12:EVENTOUT" : 15,
"PH12:FMC_D20" : 12,
"PH12:LCD_R6" : 14,
"PH12:TIM5_CH3" : 2,
"PH13:CAN1_TX" : 9,
"PH13:EVENTOUT" : 15,
"PH13:FMC_D21" : 12,
"PH13:LCD_G2" : 14,
"PH13:TIM8_CH1N" : 3,
"PH14:DCMI_D4" : 13,
"PH14:EVENTOUT" : 15,
"PH14:FMC_D22" : 12,
"PH14:LCD_G3" : 14,
"PH14:TIM8_CH2N" : 3,
"PH15:DCMI_D11" : 13,
"PH15:EVENTOUT" : 15,
"PH15:FMC_D23" : 12,
"PH15:LCD_G4" : 14,
"PH15:TIM8_CH3N" : 3,
"PH1:EVENTOUT" : 15,
"PH2:ETH_MII_CRS" : 11,
"PH2:EVENTOUT" : 15,
"PH2:FMC_SDCKE0" : 12,
"PH2:LCD_R0" : 14,
"PH3:ETH_MII_COL" : 11,
"PH3:EVENTOUT" : 15,
"PH3:FMC_SDNE0" : 12,
"PH3:LCD_R1" : 14,
"PH4:EVENTOUT" : 15,
"PH4:I2C2_SCL" : 4,
"PH4:OTG_HS_ULPI_NXT" : 10,
"PH5:EVENTOUT" : 15,
"PH5:FMC_SDNWE" : 12,
"PH5:I2C2_SDA" : 4,
"PH5:SPI5_NSS" : 5,
"PH6:DCMI_D8" : 13,
"PH6:FMC_SDNE1" : 12,
"PH6:I2C2_SMBA" : 4,
"PH6:SPI5_SCK" : 5,
"PH6:TIM12_CH1" : 9,
"PH7:DCMI_D9" : 13,
"PH7:ETH_MII_RXD3" : 11,
"PH7:FMC_SDCKE1" : 12,
"PH7:I2C3_SCL" : 4,
"PH7:SPI5_MISO" : 5,
"PH8:DCMI_HSYNC" : 13,
"PH8:EVENTOUT" : 15,
"PH8:FMC_D16" : 12,
"PH8:I2C3_SDA" : 4,
"PH8:LCD_R2" : 14,
"PH9:DCMI_D0" : 13,
"PH9:EVENTOUT" : 15,
"PH9:FMC_D17" : 12,
"PH9:I2C3_SMBA" : 4,
"PH9:LCD_R3" : 14,
"PH9:TIM12_CH2" : 9,
}
ADC1_map = {
# format is PIN : ADC1_CHAN
"PA0" : 0,
"PA1" : 1,
"PA2" : 2,
"PA3" : 3,
"PA4" : 4,
"PA5" : 5,
"PA6" : 6,
"PA7" : 7,
"PB0" : 8,
"PB1" : 9,
"PC0" : 10,
"PC1" : 11,
"PC2" : 12,
"PC3" : 13,
"PC4" : 14,
"PC5" : 15,
}
| gpl-3.0 | -2,235,886,264,404,956,400 | 25.972152 | 93 | 0.443355 | false |
pledra/odoo-product-configurator | product_configurator/models/product_config.py | 1 | 57284 | from ast import literal_eval
from odoo.addons import decimal_precision as dp
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError, UserError
from odoo.tools.misc import formatLang
class ProductConfigDomain(models.Model):
_name = 'product.config.domain'
_description = "Domain for Config Restrictions"
@api.multi
@api.depends('implied_ids')
def _get_trans_implied(self):
"Computes the transitive closure of relation implied_ids"
def linearize(domains):
trans_domains = domains
for domain in domains:
implied_domains = domain.implied_ids - domain
if implied_domains:
trans_domains |= linearize(implied_domains)
return trans_domains
for domain in self:
domain.trans_implied_ids = linearize(domain)
@api.multi
def compute_domain(self):
""" Returns a list of domains defined on a product.config.domain_line_ids
and all implied_ids"""
# TODO: Enable the usage of OR operators between implied_ids
# TODO: Add implied_ids sequence field to enforce order of operations
# TODO: Prevent circular dependencies
computed_domain = []
for domain in self:
lines = domain.trans_implied_ids.mapped('domain_line_ids').sorted()
if not lines:
continue
for line in lines[:-1]:
if line.operator == 'or':
computed_domain.append('|')
computed_domain.append(
(line.attribute_id.id,
line.condition,
line.value_ids.ids)
)
# ensure 2 operands follow the last operator
computed_domain.append(
(lines[-1].attribute_id.id,
lines[-1].condition,
lines[-1].value_ids.ids)
)
return computed_domain
name = fields.Char(
string='Name',
required=True,
size=256
)
domain_line_ids = fields.One2many(
comodel_name='product.config.domain.line',
inverse_name='domain_id',
string='Restrictions',
required=True,
copy=True
)
implied_ids = fields.Many2many(
comodel_name='product.config.domain',
relation='product_config_domain_implied_rel',
string='Inherited',
column1='domain_id',
column2='parent_id'
)
trans_implied_ids = fields.Many2many(
comodel_name='product.config.domain',
compute=_get_trans_implied,
column1='domain_id',
column2='parent_id',
string='Transitively inherits'
)
class ProductConfigDomainLine(models.Model):
_name = 'product.config.domain.line'
_order = 'sequence'
_description = "Domain Line for Config Restrictions"
def _get_domain_conditions(self):
operators = [
('in', 'In'),
('not in', 'Not In')
]
return operators
def _get_domain_operators(self):
andor = [
('and', 'And'),
('or', 'Or'),
]
return andor
attribute_id = fields.Many2one(
comodel_name='product.attribute',
string='Attribute',
required=True)
domain_id = fields.Many2one(
comodel_name='product.config.domain',
required=True,
string='Rule')
condition = fields.Selection(
selection=_get_domain_conditions,
string="Condition",
required=True)
value_ids = fields.Many2many(
comodel_name='product.attribute.value',
relation='product_config_domain_line_attr_rel',
column1='line_id',
column2='attribute_id',
string='Values',
required=True
)
operator = fields.Selection(
selection=_get_domain_operators,
string='Operators',
default='and',
required=True
)
sequence = fields.Integer(
string="Sequence",
default=1,
help="Set the order of operations for evaluation domain lines"
)
class ProductConfigLine(models.Model):
_name = 'product.config.line'
_description = "Product Config Restrictions"
# TODO: Prevent config lines having dependencies that are not set in other
# config lines
# TODO: Prevent circular depdencies: Length -> Color, Color -> Length
@api.onchange('attribute_line_id')
def onchange_attribute(self):
self.value_ids = False
self.domain_id = False
product_tmpl_id = fields.Many2one(
comodel_name='product.template',
string='Product Template',
ondelete='cascade',
required=True
)
attribute_line_id = fields.Many2one(
comodel_name='product.template.attribute.line',
string='Attribute Line',
ondelete='cascade',
required=True
)
# TODO: Find a more elegant way to restrict the value_ids
attr_line_val_ids = fields.Many2many(
comodel_name='product.attribute.value',
related='attribute_line_id.value_ids'
)
value_ids = fields.Many2many(
comodel_name='product.attribute.value',
id1="cfg_line_id",
id2="attr_val_id",
string="Values"
)
domain_id = fields.Many2one(
comodel_name='product.config.domain',
required=True,
string='Restrictions'
)
sequence = fields.Integer(string='Sequence', default=10)
_order = 'product_tmpl_id, sequence, id'
@api.multi
@api.constrains('value_ids')
def check_value_attributes(self):
for line in self.filtered(lambda l: l.value_ids):
value_attributes = line.value_ids.mapped('attribute_id')
if value_attributes != line.attribute_line_id.attribute_id:
raise ValidationError(
_("Values must belong to the attribute of the "
"corresponding attribute_line set on the configuration "
"line")
)
class ProductConfigImage(models.Model):
_name = 'product.config.image'
_description = "Product Config Image"
name = fields.Char('Name', size=128, required=True, translate=True)
product_tmpl_id = fields.Many2one(
comodel_name='product.template',
string='Product',
ondelete='cascade',
required=True
)
image = fields.Binary('Image', required=True)
sequence = fields.Integer(string='Sequence', default=10)
value_ids = fields.Many2many(
comodel_name='product.attribute.value',
string='Configuration'
)
_order = 'sequence'
@api.multi
@api.constrains('value_ids')
def _check_value_ids(self):
cfg_session_obj = self.env['product.config.session']
for cfg_img in self:
try:
cfg_session_obj.validate_configuration(
value_ids=cfg_img.value_ids.ids,
product_tmpl_id=cfg_img.product_tmpl_id.id,
final=False)
except ValidationError:
raise ValidationError(
_("Values entered for line '%s' generate "
"a incompatible configuration" % cfg_img.name)
)
class ProductConfigStep(models.Model):
_name = 'product.config.step'
_description = "Product Config Steps"
# TODO: Prevent values which have dependencies to be set in a
# step with higher sequence than the dependency
name = fields.Char(
string='Name',
size=128,
required=True,
translate=True
)
class ProductConfigStepLine(models.Model):
_name = 'product.config.step.line'
_description = "Product Config Step Lines"
name = fields.Char(related='config_step_id.name')
config_step_id = fields.Many2one(
comodel_name='product.config.step',
string='Configuration Step',
required=True
)
attribute_line_ids = fields.Many2many(
comodel_name='product.template.attribute.line',
relation='config_step_line_attr_id_rel',
column1='cfg_line_id',
column2='attr_id',
string='Attribute Lines'
)
product_tmpl_id = fields.Many2one(
comodel_name='product.template',
string='Product Template',
ondelete='cascade',
required=True
)
sequence = fields.Integer(
string='Sequence',
default=10
)
_order = 'sequence, config_step_id, id'
@api.constrains('config_step_id')
def _check_config_step(self):
for config_step in self:
cfg_step_lines = config_step.product_tmpl_id.config_step_line_ids
cfg_steps = cfg_step_lines.filtered(
lambda line: line != config_step).mapped('config_step_id')
if config_step.config_step_id in cfg_steps:
raise ValidationError(_(
'Cannot have a configuration step defined twice.'
))
class ProductConfigSession(models.Model):
_name = 'product.config.session'
_description = "Product Config Session"
@api.multi
@api.depends(
'value_ids', 'product_tmpl_id.list_price',
'product_tmpl_id.attribute_line_ids',
'product_tmpl_id.attribute_line_ids.value_ids',
'product_tmpl_id.attribute_line_ids.product_template_value_ids',
'product_tmpl_id.attribute_line_ids.'
'product_template_value_ids.price_extra')
def _compute_cfg_price(self):
for session in self:
if session.product_tmpl_id:
price = session.get_cfg_price()
else:
price = 0.00
session.price = price
def get_custom_value_id(self):
custom_ext_id = 'product_configurator.custom_attribute_value'
custom_val_id = self.env.ref(custom_ext_id)
return custom_val_id
@api.model
def _get_custom_vals_dict(self):
"""Retrieve session custom values as a dictionary of the form
{attribute_id: parsed_custom_value}"""
custom_vals = {}
for val in self.custom_value_ids:
if val.attribute_id.custom_type in ['float', 'int']:
custom_vals[val.attribute_id.id] = literal_eval(val.value)
elif val.attribute_id.custom_type == 'binary':
custom_vals[val.attribute_id.id] = val.attachment_ids
else:
custom_vals[val.attribute_id.id] = val.value
return custom_vals
@api.multi
def _compute_config_step_name(self):
"""Get the config.step.line name using the string stored in config_step
field of the session"""
cfg_step_line_obj = self.env['product.config.step.line']
cfg_session_step_lines = self.mapped('config_step')
cfg_step_line_ids = set()
for step in cfg_session_step_lines:
try:
cfg_step_line_ids.add(int(step))
except ValueError:
pass
cfg_step_lines = cfg_step_line_obj.browse(cfg_step_line_ids)
for session in self:
try:
config_step = int(session.config_step)
config_step_line = cfg_step_lines.filtered(
lambda x: x.id == config_step
)
session.config_step_name = config_step_line.name
except Exception:
pass
if not session.config_step_name:
session.config_step_name = session.config_step
@api.model
def get_cfg_weight(self, value_ids=None, custom_vals=None):
""" Computes the weight of the configured product based on the
configuration passed in via value_ids and custom_values
:param value_ids: list of attribute value_ids
:param custom_vals: dictionary of custom attribute values
:returns: final configuration weight"""
if value_ids is None:
value_ids = self.value_ids.ids
if custom_vals is None:
custom_vals = {}
product_tmpl = self.product_tmpl_id
self = self.with_context({'active_id': product_tmpl.id})
value_ids = self.flatten_val_ids(value_ids)
weight_extra = 0.0
product_attr_val_obj = self.env['product.template.attribute.value']
product_tmpl_attr_values = product_attr_val_obj.search([
('product_tmpl_id', 'in', product_tmpl.ids),
('product_attribute_value_id', 'in', value_ids)
])
for product_tmpl_attr_val in product_tmpl_attr_values:
weight_extra += product_tmpl_attr_val.weight_extra
return product_tmpl.weight + weight_extra
@api.multi
@api.depends(
'value_ids', 'product_tmpl_id',
'product_tmpl_id.attribute_line_ids',
'product_tmpl_id.attribute_line_ids.value_ids',
'product_tmpl_id.attribute_line_ids.product_template_value_ids',
'product_tmpl_id.attribute_line_ids.product_template_value_ids'
'.weight_extra')
def _compute_cfg_weight(self):
for cfg_session in self:
cfg_session.weight = cfg_session.get_cfg_weight()
@api.multi
def _compute_currency_id(self):
main_company = self.env['res.company']._get_main_company()
for session in self:
template = session.product_tmpl_id
session.currency_id = (
template.company_id.sudo().currency_id.id or
main_company.currency_id.id
)
name = fields.Char(
string='Configuration Session Number',
readonly=True
)
config_step = fields.Char(
string='Configuration Step ID'
)
config_step_name = fields.Char(
compute='_compute_config_step_name',
string="Configuration Step"
)
product_id = fields.Many2one(
comodel_name='product.product',
name='Configured Variant',
ondelete='cascade',
)
product_tmpl_id = fields.Many2one(
comodel_name='product.template',
domain=[('config_ok', '=', True)],
string='Configurable Template',
required=True
)
value_ids = fields.Many2many(
comodel_name='product.attribute.value',
relation='product_config_session_attr_values_rel',
column1='cfg_session_id',
column2='attr_val_id',
)
user_id = fields.Many2one(
comodel_name='res.users',
required=True,
string='User'
)
custom_value_ids = fields.One2many(
comodel_name='product.config.session.custom.value',
inverse_name='cfg_session_id',
string='Custom Values'
)
price = fields.Float(
compute='_compute_cfg_price',
string='Price',
store=True,
digits=dp.get_precision('Product Price')
)
currency_id = fields.Many2one(
comodel_name='res.currency',
string='Currency',
compute='_compute_currency_id'
)
state = fields.Selection(
string='State',
required=True,
selection=[
('draft', 'Draft'),
('done', 'Done')
],
default='draft'
)
weight = fields.Float(
string="Weight",
compute="_compute_cfg_weight",
digits=dp.get_precision('Stock Weight')
)
# Product preset
product_preset_id = fields.Many2one(
comodel_name='product.product',
string='Preset',
domain="[('product_tmpl_id', '=', product_tmpl_id),\
('config_preset_ok', '=', True)]"
)
@api.multi
def action_confirm(self, product_id=None):
for session in self:
if product_id is None:
product_id = session.create_get_variant()
session.write({
'state': 'done',
'product_id': product_id.id
})
return True
@api.constrains('state')
def _check_product_id(self):
for session in self.filtered(lambda s: s.state == 'done'):
if not session.product_id:
raise ValidationError(_(
"Finished configuration session must have a "
"product_id linked")
)
@api.multi
def update_session_configuration_value(self, vals, product_tmpl_id=None):
"""Update value of configuration in current session
:param: vals: Dictionary of fields(of configution wizard) and values
:param: product_tmpl_id: record set of preoduct template
:return: True/False
"""
self.ensure_one()
if not product_tmpl_id:
product_tmpl_id = self.product_tmpl_id
product_configurator_obj = self.env['product.configurator']
field_prefix = product_configurator_obj._prefixes.get('field_prefix')
custom_field_prefix = product_configurator_obj._prefixes.get(
'custom_field_prefix')
custom_val = self.get_custom_value_id()
attr_val_dict = {}
custom_val_dict = {}
for attr_line in product_tmpl_id.attribute_line_ids:
attr_id = attr_line.attribute_id.id
field_name = field_prefix + str(attr_id)
custom_field_name = custom_field_prefix + str(attr_id)
if field_name not in vals and custom_field_name not in vals:
continue
# Add attribute values from the client except custom attribute
# If a custom value is being written, but field name is not in
# the write dictionary, then it must be a custom value!
if vals.get(field_name, custom_val.id) != custom_val.id:
if attr_line.multi and isinstance(vals[field_name], list):
if not vals[field_name]:
field_val = None
else:
field_val = vals[field_name][0][2]
elif not attr_line.multi and isinstance(vals[field_name], int):
field_val = vals[field_name]
else:
raise UserError(
_('An error occursed while parsing value for '
'attribute %s' % attr_line.attribute_id.name)
)
attr_val_dict.update({
attr_id: field_val
})
# Ensure there is no custom value stored if we have switched
# from custom value to selected attribute value.
if attr_line.custom:
custom_val_dict.update({attr_id: False})
elif attr_line.custom:
val = vals.get(custom_field_name, False)
if attr_line.attribute_id.custom_type == 'binary':
# TODO: Add widget that enables multiple file uploads
val = [{
'name': 'custom',
'datas': vals[custom_field_name]
}]
custom_val_dict.update({
attr_id: val
})
# Ensure there is no standard value stored if we have switched
# from selected value to custom value.
attr_val_dict.update({attr_id: False})
self.update_config(attr_val_dict, custom_val_dict)
@api.multi
def update_config(self, attr_val_dict=None, custom_val_dict=None):
"""Update the session object with the given value_ids and custom values.
Use this method instead of write in order to prevent incompatible
configurations as this removed duplicate values for the same attribute.
:param attr_val_dict: Dictionary of the form {
int (attribute_id): attribute_value_id OR [attribute_value_ids]
}
:custom_val_dict: Dictionary of the form {
int (attribute_id): {
'value': 'custom val',
OR
'attachment_ids': {
[{
'name': 'attachment name',
'datas': base64_encoded_string
}]
}
}
}
"""
if attr_val_dict is None:
attr_val_dict = {}
if custom_val_dict is None:
custom_val_dict = {}
update_vals = {}
value_ids = self.value_ids.ids
for attr_id, vals in attr_val_dict.items():
attr_val_ids = self.value_ids.filtered(
lambda x: x.attribute_id.id == int(attr_id)).ids
# Remove all values for this attribute and add vals from dict
value_ids = list(set(value_ids) - set(attr_val_ids))
if not vals:
continue
if isinstance(vals, list):
value_ids += vals
elif isinstance(vals, int):
value_ids.append(vals)
if value_ids != self.value_ids.ids:
update_vals.update({
'value_ids': [(6, 0, value_ids)]
})
# Remove all custom values included in the custom_vals dict
self.custom_value_ids.filtered(
lambda x: x.attribute_id.id in custom_val_dict.keys()).unlink()
if custom_val_dict:
binary_field_ids = self.env['product.attribute'].search([
('id', 'in', list(custom_val_dict.keys())),
('custom_type', '=', 'binary')
]).ids
for attr_id, vals in custom_val_dict.items():
if not vals:
continue
if 'custom_value_ids' not in update_vals:
update_vals['custom_value_ids'] = []
custom_vals = {'attribute_id': attr_id}
if attr_id in binary_field_ids:
attachments = [(0, 0, {
'name': val.get('name'),
'datas_fname': val.get('name'),
'datas': val.get('datas')
}) for val in vals]
custom_vals.update({'attachment_ids': attachments})
else:
custom_vals.update({'value': vals})
update_vals['custom_value_ids'].append((0, 0, custom_vals))
self.write(update_vals)
@api.multi
def write(self, vals):
"""Validate configuration when writing new values to session"""
# TODO: Issue warning when writing to value_ids or custom_val_ids
res = super(ProductConfigSession, self).write(vals)
value_ids = self.value_ids.ids
avail_val_ids = self.values_available(value_ids)
if set(value_ids) - set(avail_val_ids):
self.value_ids = [(6, 0, avail_val_ids)]
try:
self.validate_configuration(final=False)
except ValidationError as ex:
raise ValidationError(ex)
except Exception:
raise ValidationError(_('Invalid Configuration'))
return res
@api.model
def create(self, vals):
vals['name'] = self.env['ir.sequence'].next_by_code(
'product.config.session') or _('New')
product_tmpl = self.env['product.template'].browse(
vals.get('product_tmpl_id')).exists()
if product_tmpl:
default_val_ids = product_tmpl.attribute_line_ids.filtered(
lambda l: l.default_val).mapped('default_val').ids
value_ids = vals.get('value_ids')
if value_ids:
default_val_ids += value_ids[0][2]
try:
self.validate_configuration(
value_ids=default_val_ids, final=False,
product_tmpl_id=product_tmpl.id
)
# TODO: Remove if cond when PR with
# raise error on github is merged
except ValidationError as ex:
raise ValidationError(ex)
except Exception:
raise ValidationError(
_('Default values provided generate an invalid '
'configuration')
)
vals.update({'value_ids': [(6, 0, default_val_ids)]})
return super(ProductConfigSession, self).create(vals)
@api.multi
def create_get_variant(self, value_ids=None, custom_vals=None):
""" Creates a new product variant with the attributes passed
via value_ids and custom_values or retrieves an existing
one based on search result
:param value_ids: list of product.attribute.values ids
:param custom_vals: dict {product.attribute.id: custom_value}
:returns: new/existing product.product recordset
"""
if self.product_tmpl_id.config_ok:
self.validate_configuration()
if value_ids is None:
value_ids = self.value_ids.ids
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
try:
self.validate_configuration()
except ValidationError as ex:
raise ValidationError(ex)
except Exception:
raise ValidationError(_('Invalid Configuration'))
duplicates = self.search_variant(
value_ids=value_ids, custom_vals=custom_vals)
# At the moment, I don't have enough confidence with my understanding
# of binary attributes, so will leave these as not matching...
# In theory, they should just work, if they are set to "non search"
# in custom field def!
# TODO: Check the logic with binary attributes
if custom_vals:
value_custom_ids = self.encode_custom_values(custom_vals)
if any('attachment_ids' in cv[2] for cv in value_custom_ids):
duplicates = False
if duplicates:
return duplicates[:1]
vals = self.get_variant_vals(value_ids, custom_vals)
product_obj = self.env['product.product'].sudo().with_context(
mail_create_nolog=True
)
variant = product_obj.sudo().create(vals)
variant.message_post(
body=_('Product created via configuration wizard'),
author_id=self.env.user.partner_id.id
)
return variant
@api.multi
def _get_option_values(self, pricelist, value_ids=None):
"""Return only attribute values that have products attached with a
price set to them"""
if value_ids is None:
value_ids = self.value_ids.ids
value_obj = self.env['product.attribute.value'].with_context({
'pricelist': pricelist.id})
values = value_obj.sudo().browse(value_ids).filtered(
lambda x: x.product_id.price)
return values
@api.multi
def get_components_prices(self, prices, pricelist, value_ids=None):
"""Return prices of the components which make up the final
configured variant"""
if value_ids is None:
value_ids = self.value_ids.ids
vals = self._get_option_values(pricelist, value_ids)
for val in vals:
prices['vals'].append(
(val.attribute_id.name,
val.product_id.name,
val.product_id.price)
)
product = val.product_id.with_context({'pricelist': pricelist.id})
product_prices = product.taxes_id.sudo().compute_all(
price_unit=product.price,
currency=pricelist.currency_id,
quantity=1,
product=self,
partner=self.env.user.partner_id
)
total_included = product_prices['total_included']
taxes = total_included - product_prices['total_excluded']
prices['taxes'] += taxes
prices['total'] += total_included
return prices
@api.model
def get_cfg_price(self, value_ids=None, custom_vals=None):
""" Computes the price of the configured product based on the configuration
passed in via value_ids and custom_values
:param value_ids: list of attribute value_ids
:param custom_vals: dictionary of custom attribute values
:returns: final configuration price"""
if value_ids is None:
value_ids = self.value_ids.ids
if custom_vals is None:
custom_vals = {}
product_tmpl = self.product_tmpl_id
self = self.with_context({'active_id': product_tmpl.id})
value_ids = self.flatten_val_ids(value_ids)
price_extra = 0.0
attr_val_obj = self.env['product.attribute.value']
av_ids = attr_val_obj.browse(value_ids)
extra_prices = attr_val_obj.get_attribute_value_extra_prices(
product_tmpl_id=product_tmpl.id,
pt_attr_value_ids=av_ids,
)
price_extra = sum(extra_prices.values())
return product_tmpl.list_price + price_extra
def _get_config_image(
self, value_ids=None, custom_vals=None, size=None):
"""
Retreive the image object that most closely resembles the configuration
code sent via value_ids list
The default image object is the template (self)
:param value_ids: a list representing the ids of attribute values
(usually stored in the user's session)
:param custom_vals: dictionary of custom attribute values
:returns: path to the selected image
"""
# TODO: Also consider custom values for image change
if value_ids is None:
value_ids = self.value_ids.ids
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
img_obj = self.product_tmpl_id
max_matches = 0
value_ids = self.flatten_val_ids(value_ids)
for line in self.product_tmpl_id.config_image_ids:
matches = len(set(line.value_ids.ids) & set(value_ids))
if matches > max_matches:
img_obj = line
max_matches = matches
return img_obj
def get_config_image(
self, value_ids=None, custom_vals=None, size=None):
"""
Retreive the image object that most closely resembles the configuration
code sent via value_ids list
For more information check _get_config_image
"""
config_image_id = self._get_config_image(
value_ids=value_ids,
custom_vals=custom_vals
)
return config_image_id.image
@api.model
def get_variant_vals(self, value_ids=None, custom_vals=None, **kwargs):
""" Hook to alter the values of the product variant before creation
:param value_ids: list of product.attribute.values ids
:param custom_vals: dict {product.attribute.id: custom_value}
:returns: dictionary of values to pass to product.create() method
"""
self.ensure_one()
if value_ids is None:
value_ids = self.value_ids.ids
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
image = self.get_config_image(value_ids)
vals = {
'product_tmpl_id': self.product_tmpl_id.id,
'attribute_value_ids': [(6, 0, value_ids)],
'taxes_id': [(6, 0, self.product_tmpl_id.taxes_id.ids)],
'image': image,
}
if custom_vals:
vals.update({
'value_custom_ids': self.encode_custom_values(custom_vals)
})
return vals
@api.multi
def get_session_search_domain(self, product_tmpl_id, state='draft',
parent_id=None, user_id=None):
if not user_id:
user_id = self.env.uid
domain = [
('product_tmpl_id', '=', product_tmpl_id),
('user_id', '=', user_id),
('state', '=', state),
]
if parent_id:
domain.append(('parent_id', '=', parent_id))
return domain
@api.multi
def get_session_vals(self, product_tmpl_id, parent_id=None, user_id=None):
if not user_id:
user_id = self.env.user.id
vals = {
'product_tmpl_id': product_tmpl_id,
'user_id': user_id,
}
if parent_id:
vals.update(parent_id=parent_id)
return vals
@api.multi
def get_next_step(self, state, product_tmpl_id=False,
value_ids=False, custom_value_ids=False):
"""Find and return next step if exit. This usually
implies the next configuration step (if any) defined via the
config_step_line_ids on the product.template.
"""
if not product_tmpl_id:
product_tmpl_id = self.product_tmpl_id
if value_ids is False:
value_ids = self.value_ids
if custom_value_ids is False:
custom_value_ids = self.custom_value_ids
if not state:
state = self.config_step
cfg_step_lines = product_tmpl_id.config_step_line_ids
if not cfg_step_lines:
if (value_ids or custom_value_ids)\
and not state == 'select':
return False
elif not (value_ids or custom_value_ids)\
and not state == 'select':
raise UserError(_(
"You must select at least one "
"attribute in order to configure a product"
))
else:
return 'configure'
adjacent_steps = self.get_adjacent_steps()
next_step = adjacent_steps.get('next_step')
open_step_lines = list(map(
lambda x: '%s' % (x),
self.get_open_step_lines().ids
))
session_config_step = self.config_step
if (session_config_step and
state != session_config_step and
session_config_step in open_step_lines):
next_step = self.config_step
else:
next_step = str(next_step.id) if next_step else None
if next_step:
pass
elif not (value_ids or custom_value_ids):
raise UserError(_(
"You must select at least one "
"attribute in order to configure a product"
))
else:
return False
return next_step
# TODO: Should be renamed to get_active_step_line
@api.model
def get_active_step(self):
"""Attempt to return product.config.step.line object that has the id
of the config session step stored as string"""
cfg_step_line_obj = self.env['product.config.step.line']
try:
cfg_step_line_id = int(self.config_step)
except ValueError:
cfg_step_line_id = None
if cfg_step_line_id:
return cfg_step_line_obj.browse(cfg_step_line_id)
return cfg_step_line_obj
@api.model
def get_open_step_lines(self, value_ids=None):
"""
Returns a recordset of configuration step lines open for access given
the configuration passed through value_ids
e.g: Field A and B from configuration step 2 depend on Field C
from configuration step 1. Since fields A and B require action from
the previous step, configuration step 2 is deemed closed and redirect
is made for configuration step 1.
:param value_ids: list of value.ids representing the
current configuration
:returns: recordset of accesible configuration steps
"""
if value_ids is None:
value_ids = self.value_ids.ids
open_step_lines = self.env['product.config.step.line']
for cfg_line in self.product_tmpl_id.config_step_line_ids:
for attr_line in cfg_line.attribute_line_ids:
available_vals = self.values_available(
attr_line.value_ids.ids, value_ids
)
# TODO: Refactor when adding restriction to custom values
if available_vals or attr_line.custom:
open_step_lines |= cfg_line
break
return open_step_lines.sorted()
@api.model
def get_all_step_lines(self, product_tmpl_id=None):
"""
Returns a recordset of configuration step lines of product_tmpl_id
:param product_tmpl_id: record-set of product.template
:returns: recordset of all configuration steps
"""
if not product_tmpl_id:
product_tmpl_id = self.product_tmpl_id
open_step_lines = product_tmpl_id.config_step_line_ids
return open_step_lines.sorted()
@api.model
def get_adjacent_steps(self, value_ids=None, active_step_line_id=None):
"""Returns the previous and next steps given the configuration passed
via value_ids and the active step line passed via cfg_step_line_id."""
# If there is no open step return empty dictionary
if value_ids is None:
value_ids = self.value_ids.ids
if not active_step_line_id:
active_step_line_id = self.get_active_step().id
config_step_lines = self.product_tmpl_id.config_step_line_ids
if not config_step_lines:
return {}
active_cfg_step_line = config_step_lines.filtered(
lambda l: l.id == active_step_line_id)
open_step_lines = self.get_open_step_lines(value_ids)
if not active_cfg_step_line:
return {'next_step': open_step_lines[0]}
nr_steps = len(open_step_lines)
adjacent_steps = {}
for i, cfg_step in enumerate(open_step_lines):
if cfg_step == active_cfg_step_line:
adjacent_steps.update({
'next_step':
None if i + 1 == nr_steps else open_step_lines[i + 1],
'previous_step': None if i == 0 else open_step_lines[i - 1]
})
return adjacent_steps
def check_and_open_incomplete_step(self, value_ids=None,
custom_value_ids=None):
""" Check and open incomplete step if any
:param value_ids: recordset of product.attribute.value
"""
if value_ids is None:
value_ids = self.value_ids
if custom_value_ids is None:
custom_value_ids = self.custom_value_ids
custom_attr_selected = custom_value_ids.mapped('attribute_id')
open_step_lines = self.get_open_step_lines()
step_to_open = False
for step in open_step_lines:
unset_attr_line = step.attribute_line_ids.filtered(
lambda attr_line:
attr_line.required and
not any([value in value_ids for value in attr_line.value_ids])
and not (
attr_line.custom and
attr_line.attribute_id in custom_attr_selected
)
)
check_val_ids = unset_attr_line.mapped('value_ids')
avail_val_ids = self.values_available(
check_val_ids.ids,
value_ids.ids,
product_tmpl_id=self.product_tmpl_id
)
if unset_attr_line and avail_val_ids:
step_to_open = step
break
if step_to_open:
return '%s' % (step_to_open.id)
return False
@api.model
def get_variant_search_domain(
self, product_tmpl_id=None, value_ids=None, custom_vals=None):
"""Method called by search_variant used to search duplicates in the
database"""
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
if value_ids is None:
value_ids = self.value_ids.ids
attr_obj = self.env['product.attribute']
domain = [
('product_tmpl_id', '=', product_tmpl_id),
('config_ok', '=', True)
]
for value_id in value_ids:
domain.append(('attribute_value_ids', '=', value_id))
attr_search = attr_obj.search([
('search_ok', '=', True),
('custom_type', 'not in', attr_obj._get_nosearch_fields())
])
for attr_id, value in custom_vals.items():
if attr_id not in attr_search.ids:
domain.append(
('value_custom_ids.attribute_id', '!=', int(attr_id)))
else:
domain.append(
('value_custom_ids.attribute_id', '=', int(attr_id)))
domain.append(('value_custom_ids.value', '=', value))
return domain
def validate_domains_against_sels(
self, domains, value_ids=None, custom_vals=None):
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
if value_ids is None:
value_ids = self.value_ids.ids
# process domains as shown in this wikipedia pseudocode:
# https://en.wikipedia.org/wiki/Polish_notation#Order_of_operations
stack = []
for domain in reversed(domains):
if type(domain) == tuple:
# evaluate operand and push to stack
if domain[1] == 'in':
if not set(domain[2]) & set(value_ids):
stack.append(False)
continue
else:
if set(domain[2]) & set(value_ids):
stack.append(False)
continue
stack.append(True)
else:
# evaluate operator and previous 2 operands
# compute_domain() only inserts 'or' operators
# compute_domain() enforces 2 operands per operator
operand1 = stack.pop()
operand2 = stack.pop()
stack.append(operand1 or operand2)
# 'and' operator is implied for remaining stack elements
avail = True
while stack:
avail &= stack.pop()
return avail
@api.model
def values_available(
self, check_val_ids=None, value_ids=None,
custom_vals=None, product_tmpl_id=None):
"""Determines whether the attr_values from the product_template
are available for selection given the configuration ids and the
dependencies set on the product template
:param check_val_ids: list of attribute value ids to check for
availability
:param value_ids: list of attribute value ids
:param custom_vals: custom values dict {attr_id: custom_val}
:returns: list of available attribute values
"""
if check_val_ids is None:
check_val_ids = self.value_ids.ids
elif check_val_ids:
check_val_ids = check_val_ids.copy()
if not self.product_tmpl_id:
product_tmpl = self.env['product.template'].browse(product_tmpl_id)
else:
product_tmpl = self.product_tmpl_id
product_tmpl.ensure_one()
if value_ids is None:
value_ids = self.value_ids.ids
elif value_ids:
value_ids = value_ids.copy()
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
avail_val_ids = []
for attr_val_id in check_val_ids:
config_lines = product_tmpl.config_line_ids.filtered(
lambda l: attr_val_id in l.value_ids.ids
)
domains = config_lines.mapped('domain_id').compute_domain()
avail = self.validate_domains_against_sels(
domains, value_ids, custom_vals
)
if avail:
avail_val_ids.append(attr_val_id)
elif attr_val_id in value_ids:
value_ids.remove(attr_val_id)
return avail_val_ids
@api.model
def validate_configuration(
self, value_ids=None, custom_vals=None,
product_tmpl_id=False, final=True):
""" Verifies if the configuration values passed via value_ids and custom_vals
are valid
:param value_ids: list of attribute value ids
:param custom_vals: custom values dict {attr_id: custom_val}
:param final: boolean marker to check required attributes.
pass false to check non-final configurations
:returns: Error dict with reason of validation failure
or True
"""
# TODO: Raise ConfigurationError with reason
# Check if required values are missing for final configuration
if value_ids is None:
value_ids = self.value_ids.ids
if product_tmpl_id:
product_tmpl = self.env['product.template'].browse(product_tmpl_id)
else:
product_tmpl = self.product_tmpl_id
product_tmpl.ensure_one()
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
open_step_lines = self.get_open_step_lines()
attribute_line_ids = open_step_lines.mapped('attribute_line_ids')
for line in attribute_line_ids:
# Validate custom values
attr = line.attribute_id
if attr.id in custom_vals:
attr.validate_custom_val(custom_vals[attr.id])
if final:
common_vals = set(value_ids) & set(line.value_ids.ids)
custom_val = custom_vals.get(attr.id)
avail_val_ids = self.values_available(
line.value_ids.ids,
value_ids,
product_tmpl_id=self.product_tmpl_id
)
if line.required and avail_val_ids and not common_vals and\
not custom_val:
# TODO: Verify custom value type to be correct
raise ValidationError(_(
"Required attribute '%s' is empty" % (attr.name)))
# Check if all all the values passed are not restricted
avail_val_ids = self.values_available(
value_ids, value_ids, product_tmpl_id=product_tmpl_id
)
if set(value_ids) - set(avail_val_ids):
restrict_val = list(set(value_ids) - set(avail_val_ids))
product_att_values = self.env['product.attribute.value'].browse(
restrict_val)
group_by_attr = {}
for val in product_att_values:
if val.attribute_id in group_by_attr:
group_by_attr[val.attribute_id] += val
else:
group_by_attr[val.attribute_id] = val
message = 'The following values are not available:'
for attr, val in group_by_attr.items():
message += '\n%s: %s' % (
attr.name, ', '.join(val.mapped('name'))
)
raise ValidationError(_(message))
# Check if custom values are allowed
custom_attr_ids = product_tmpl.attribute_line_ids.filtered(
'custom').mapped('attribute_id').ids
if not set(custom_vals.keys()) <= set(custom_attr_ids):
custom_attrs_with_error = list(
set(custom_vals.keys()) - set(custom_attr_ids))
custom_attrs_with_error = self.env['product.attribute'].browse(
custom_attrs_with_error)
error_message = _(
"The following custom values are not permitted "
"according to the product template - %s.\n\nIt is possible "
"that a change has been made to allowed custom values "
"while your configuration was in process. Please reset your "
"current session and start over or contact your administrator"
" in order to proceed."
)
message_vals = ""
for attr_id in custom_attrs_with_error:
message_vals += "\n%s: %s" % (
attr_id.name,
custom_vals.get(attr_id.id)
)
raise ValidationError(error_message % (message_vals))
# Check if there are multiple values passed for non-multi attributes
mono_attr_lines = product_tmpl.attribute_line_ids.filtered(
lambda l: not l.multi)
attrs_with_error = {}
for line in mono_attr_lines:
if len(set(line.value_ids.ids) & set(value_ids)) > 1:
wrong_vals = self.env['product.attribute.value'].browse(
set(line.value_ids.ids) & set(value_ids)
)
attrs_with_error[line.attribute_id] = wrong_vals
if attrs_with_error:
error_message = _(
"The following multi values are not permitted "
"according to the product template - %s.\n\nIt is possible "
"that a change has been made to allowed multi values "
"while your configuration was in process. Please reset your "
"current session and start over or contact your administrator"
" in order to proceed."
)
message_vals = ""
for attr_id, vals in attrs_with_error.items():
message_vals += "\n%s: %s" % (
attr_id.name,
', '.join(vals.mapped('name'))
)
raise ValidationError(error_message % (message_vals))
return True
@api.model
def search_variant(
self, value_ids=None, custom_vals=None, product_tmpl_id=None):
""" Searches product.variants with given value_ids and custom values
given in the custom_vals dict
:param value_ids: list of product.attribute.values ids
:param custom_vals: dict {product.attribute.id: custom_value}
:returns: product.product recordset of products matching domain
"""
if value_ids is None:
value_ids = self.value_ids.ids
if custom_vals is None:
custom_vals = self._get_custom_vals_dict()
if not product_tmpl_id:
session_template = self.product_tmpl_id
if not session_template:
raise ValidationError(_(
'Cannot conduct search on an empty config session without '
'product_tmpl_id kwarg')
)
product_tmpl_id = self.product_tmpl_id.id
domain = self.get_variant_search_domain(
product_tmpl_id=product_tmpl_id,
value_ids=value_ids,
custom_vals=custom_vals
)
products = self.env['product.product'].search(domain)
# At this point, we might have found products with all of the passed
# in values, but it might have more attributes! These are NOT
# matches
more_attrs = products.filtered(
lambda p:
len(p.attribute_value_ids) != len(value_ids) or
len(p.value_custom_ids) != len(custom_vals)
)
products -= more_attrs
return products
@api.multi
def search_session(self, product_tmpl_id, parent_id=None, user_id=None):
domain = self.get_session_search_domain(
product_tmpl_id=product_tmpl_id,
parent_id=parent_id,
user_id=user_id
)
session = self.search(domain, order='create_date desc', limit=1)
return session
@api.model
def create_get_session(self, product_tmpl_id, parent_id=None,
force_create=False, user_id=None):
if not force_create:
session = self.search_session(
product_tmpl_id=product_tmpl_id,
parent_id=parent_id,
user_id=user_id,
)
if session:
return session
vals = self.get_session_vals(
product_tmpl_id=product_tmpl_id,
parent_id=parent_id,
user_id=user_id
)
return self.create(vals)
# TODO: Disallow duplicates
def flatten_val_ids(self, value_ids):
""" Return a list of value_ids from a list with a mix of ids
and list of ids (multiselection)
:param value_ids: list of value ids or mix of ids and list of ids
(e.g: [1, 2, 3, [4, 5, 6]])
:returns: flattened list of ids ([1, 2, 3, 4, 5, 6]) """
flat_val_ids = set()
for val in value_ids:
if not val:
continue
if isinstance(val, list):
flat_val_ids |= set(val)
elif isinstance(val, int):
flat_val_ids.add(val)
return list(flat_val_ids)
def formatPrices(self, prices=None, dp='Product Price'):
if prices is None:
prices = {}
dp = None
prices['taxes'] = formatLang(
self.env, prices['taxes'], monetary=True, dp=dp)
prices['total'] = formatLang(
self.env, prices['total'], monetary=True, dp=dp)
prices['vals'] = [
(v[0], v[1], formatLang(self.env, v[2], monetary=True, dp=dp))
for v in prices['vals']
]
return prices
@api.multi
def encode_custom_values(self, custom_vals):
""" Hook to alter the values of the custom values before creating or writing
:param custom_vals: dict {product.attribute.id: custom_value}
:returns: list of custom values compatible with write and create
"""
attr_obj = self.env['product.attribute']
binary_attribute_ids = attr_obj.search([
('custom_type', '=', 'binary')]).ids
custom_lines = []
for key, val in custom_vals.items():
custom_vals = {'attribute_id': key}
# TODO: Is this extra check neccesairy as we already make
# the check in validate_configuration?
attr_obj.browse(key).validate_custom_val(val)
if key in binary_attribute_ids:
custom_vals.update({
'attachment_ids': [(6, 0, val.ids)]
})
else:
custom_vals.update({'value': val})
custom_lines.append((0, 0, custom_vals))
return custom_lines
class ProductConfigSessionCustomValue(models.Model):
_name = 'product.config.session.custom.value'
_rec_name = 'attribute_id'
_description = "Product Config Session Custom Value"
attribute_id = fields.Many2one(
comodel_name='product.attribute',
string='Attribute',
required=True
)
cfg_session_id = fields.Many2one(
comodel_name='product.config.session',
required=True,
ondelete='cascade',
string='Session'
)
value = fields.Char(
string='Value',
help='Custom value held as string',
)
attachment_ids = fields.Many2many(
comodel_name='ir.attachment',
relation='product_config_session_custom_value_attachment_rel',
column1='cfg_sesion_custom_val_id',
column2='attachment_id',
string='Attachments'
)
def eval(self):
"""Return custom value evaluated using the related custom field type"""
field_type = self.attribute_id.custom_type
if field_type == 'binary':
vals = self.attachment_ids.mapped('datas')
if len(vals) == 1:
return vals[0]
return vals
elif field_type == 'int':
return int(self.value)
elif field_type == 'float':
return float(self.value)
return self.value
@api.constrains('cfg_session_id', 'attribute_id')
def unique_attribute(self):
for custom_val in self:
if len(custom_val.cfg_session_id.custom_value_ids.filtered(
lambda x: x.attribute_id == custom_val.attribute_id)) > 1:
raise ValidationError(_(
"Configuration cannot have the same value inserted twice"
))
# @api.constrains('cfg_session_id.value_ids')
# def custom_only(self):
# """Verify that the attribute_id is not present in vals as well"""
# import ipdb;ipdb.set_trace()
# if self.cfg_session_id.value_ids.filtered(
# lambda x: x.attribute_id == self.attribute_id):
# raise ValidationError(
# _("Configuration cannot have a selected option and a custom "
# "value with the same attribute")
# )
@api.constrains('attachment_ids', 'value')
def check_custom_type(self):
for custom_val in self:
custom_type = custom_val.attribute_id.custom_type
if custom_val.value and custom_type == 'binary':
raise ValidationError(_(
"Attribute custom type is binary, attachments are the "
"only accepted values with this custom field type"
))
if custom_val.attachment_ids and custom_type != 'binary':
raise ValidationError(_(
"Attribute custom type must be 'binary' for saving "
"attachments to custom value"
))
| agpl-3.0 | 3,607,961,118,295,161,000 | 34.982412 | 85 | 0.560855 | false |
tealover/nova | nova/api/openstack/compute/schemas/v3/quota_sets.py | 26 | 1745 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
common_quota = {
'type': ['integer', 'string'],
'pattern': '^-?[0-9]+$',
# -1 is a flag value for unlimited
'minimum': -1
}
quota_resources = {
'instances': common_quota,
'cores': common_quota,
'ram': common_quota,
'floating_ips': common_quota,
'fixed_ips': common_quota,
'metadata_items': common_quota,
'key_pairs': common_quota,
'security_groups': common_quota,
'security_group_rules': common_quota,
'injected_files': common_quota,
'injected_file_content_bytes': common_quota,
'injected_file_path_bytes': common_quota,
'server_groups': common_quota,
'server_group_members': common_quota,
}
update_quota_set = copy.deepcopy(quota_resources)
update_quota_set.update({'force': parameter_types.boolean})
update = {
'type': 'object',
'properties': {
'type': 'object',
'quota_set': {
'properties': update_quota_set,
'additionalProperties': False,
},
},
'required': ['quota_set'],
'additionalProperties': False,
}
| apache-2.0 | -3,126,493,495,415,688,700 | 29.614035 | 78 | 0.656734 | false |
amaurywalbert/twitter | graphs/n1/n5_co_follow_creating_network_with_ego_v1.5_5k_alters.py | 2 | 11246 | # -*- coding: latin1 -*-
################################################################################################
#
#
import datetime, sys, time, json, os, os.path, shutil, time, struct, random
import networkx as nx
import matplotlib.pyplot as plt
from math import*
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 1 - Criar rede N5 (co-follow) a partir dos dados coletados e de acordo com as instruções a seguir:
## Versão 1.1 - Tentar corrigir problema de elevado consumo de memória durante a criação das redes.
## - Corrigido - Clear no grafo
## Versão 1.2 - Usar conjunto de dados com 500 egos aleatórios.
## Versão 1.3 - remover a parte de registrar arquivos faltando... "partial missing"
## Carregar dados dos alters em memória
##
##
## Versão 1.5 - Faz o procedimento apenas para egos de 3.5k a 5k alters
##
## ATENÇÃO - NECESSÁRIO PELO MENOS 8GB DE RAM
##
## # INPUT:
## - Lista de Egos (egos)
## - Conjunto Followee (alters) de cada Ego - Formação do conjunto de Alters
## - Conjunto Followee (followees) de cada Alter (ids)
##
## # ALGORITMO
## 0 - Para cada ego[i]:
## 1 - Inicializa o ego_[i] e todos os seus amigos (alters[i][n]) como vértices de um grafo - (tabela hash - ego+alters - vertices)
## 2 - Para cada elemento i no conjunto de vertices (v[i]):
## 3 - Para cada elemento j no conjunto de vértices (v[j]):
## 4 - Com i != j:
## 5 - Se não existe uma aresta (v[i],v[j]):
## 6 - Cria uma aresta entre (v[i],v[j]) com peso igual ao CSJ entre seus conjuntos de alters
## 7 - Remova arestas com peso igual a zero
##
######################################################################################################################################################################
################################################################################################
# Função para converter os arquivos binários em formato específico para ser usado na construção do grafo
# - Aqui há o retorno da lista de amigos de um alter (alter = amigo do ego)
################################################################################################
def read_arq_bin(file): # Função recebe o arquivo binário
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
friends_set = set()
while f.tell() < tamanho:
buffer = f.read(user_struct.size)
friend = user_struct.unpack(buffer)
friends_set.add(long(friend[0]))
return friends_set
################################################################################################
# Função para calcular o csj entre dois conjuntos de dados
################################################################################################
def csj(a,b):
intersection = len(a.intersection(b))
union = len(a.union(b))
# Calcula o CSJ entre os dois conjuntos e atribui 0 caso a união dos conjuntos for 0
if union != 0:
result = intersection/float(union) # float(uniao) para resultado no intervalo [0,1]
else:
result = 0
return result
################################################################################################
# Função para salvar os grafos em formato padrão para entrada nos algoritmos de detecção
################################################################################################
def save_graph(ego, G): # Função recebe o id do ego corrente e o grafo (lista de arestas)
with open(output_dir+str(ego)+".edge_list", 'wb') as graph:
nx.write_weighted_edgelist(G,graph) # Imprimir lista de arestas COM PESO
G.clear()
################################################################################################
# Gera as redes - grafos
################################################################################################
def ego_net(ego,alters_set,l): # Função recebe o id do ego, a lista de alters e o número ordinal do ego corrente
G=nx.Graph() # Inicia um grafo NÂO DIRECIONADO
G.clear()
ti = datetime.datetime.now() # Tempo do inicio da construção do grafo
########################################### # Criar tabela hash com o conjunto de dados (retweets) dos vértices (ego e todos os alters)
vertices = {}
vertices[ego] = alters_set # Adiciona o Ego ao conjunto de vértices
for alter in alters_set:
try:
alters_friends = read_arq_bin(alters_dir+str(alter)+".dat") # Chama função para converter o conjunto de amigos do ego do formato Binário para uma lista do python
vertices[alter] = alters_friends # Adiciona conjunto de dados do alter à tabela hash
except IOError: # Tratamento de exceção - caso falte algum arquivo do alter,
pass
###########################################
print ("Construindo grafo do ego n: "+str(l)+" - Quantidade de vertices: "+str(len(vertices)))
indice = 0
########################################### # Criando arestas
for i in vertices:
indice +=1
print ("Ego: "+str(l)+" - Verificando arestas para alter: "+str(indice)+"/"+str(len(vertices)))
for j in vertices:
if i != j:
if not G.has_edge(i,j): ### Se ainda não existe uma aresta entre os dois vértices
csj_i_j = csj(vertices[i],vertices[j]) # Calcula o CSJ entre os dois conjuntos
G.add_edge(i,j,weight=csj_i_j) # Cria aresta
########################################### # Remove arestas com CJS igual a zero.
########################################### # Deixar pra remover aqui pq a criação delas é interessante durante o processo de geração das redes...
print("Removendo arestas com peso 0...")
for (u,v,d) in G.edges(data='weight'):
if d==0:
G.remove_edge(u,v)
###########################################
tf = datetime.datetime.now() # Tempo final da construção do grafo do ego corrente
tp = tf - ti # Cálculo do tempo gasto para a construção do grafo
print ("Lista de arestas do grafo "+str(l)+" construído com sucesso. EGO: "+str(ego))
print("Tempo para construir o grafo: "+str(tp))
return G
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
# Realiza teste e coleta dos dados de cada user especificado no arquivo.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
missing = set() # Conjunto de usuários faltando faltando...
l = 0 # Variável para exibir o número ordinal do ego que está sendo usado para a construção do grafo
ti = datetime.datetime.now() # Tempo de início do processo de criação de todos os grafos
for file in os.listdir(egos_dir): # Para cada arquivo de Ego no diretório
l+=1 # Incrementa contador do número do Ego
ego = file.split(".dat") # Separa a extensão do id do usuário no nome do arquivo
ego = long(ego[0]) # recebe o id do usuário em formato Long
if not dictionary.has_key(ego):
alters_set = read_arq_bin(egos_dir+file) # Chama função para converter o conjunto de amigos do ego do formato Binário para uma lista do python
n_friends = len(alters_set) # Variável que armazena o tamanho da lista do usuário corrente
print("######################################################################")
print ("Construindo grafo do ego n: "+str(l)+" - Quantidade de alters: "+str(n_friends))
G = ego_net(ego,alters_set,l) # Inicia função de criação do grafo (lista de arestas) para o ego corrente
print
print("Salvando o grafo...")
save_graph(ego,G)
G.clear()
tp = datetime.datetime.now()
tp = tp - ti
print ("Tempo decorrido: "+str(tp))
print("######################################################################")
else:
print ("Lista de arestas já criada para o ego "+str(l)+": "+str(ego))
print
tf = datetime.datetime.now() # Recebe tempo final do processo de construção dos grafos
t = tf - ti # Calcula o tempo gasto com o processo de criação dos grafos
print("Tempo total do script: "+str(t))
print("Quantidade total de usuários faltando: "+str(len(missing)))
print("######################################################################")
print("Networks created!")
print("######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
######################################################################################################################
egos_dir = "/home/amaury/dataset/n1/egos_3k_a_5k_alters/bin/"###### Diretório contendo os arquivos dos Egos
alters_dir = "/home/amaury/dataset/n1/alters/bin/" # Diretório contendo os arquivos dos Alters
output_dir = "/home/amaury/graphs/n5/graphs_with_ego/" ################# Diretório para armazenamento dos arquivos das listas de arestas
formato = 'l' ####################################### Long para o código ('l') e depois o array de chars de X posições:
user_struct = struct.Struct(formato) ########################## Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
######################################################################################################################
#Cria os diretórios para armazenamento dos arquivos
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###### Iniciando dicionário - tabela hash a partir dos arquivos já criados.
print
print("######################################################################")
print ("Criando tabela hash...")
dictionary = {} #################################################### Tabela {chave:valor} para facilitar a consulta dos usuários já coletados
for file in os.listdir(output_dir):
user_id = file.split(".edge_list")
user_id = long(user_id[0])
dictionary[user_id] = user_id
print ("Tabela hash criada com sucesso...")
print("######################################################################\n")
#Executa o método main
if __name__ == "__main__": main() | gpl-3.0 | -1,815,334,411,947,517,000 | 55.492386 | 166 | 0.468744 | false |
gems-uff/noworkflow | capture/noworkflow/now/persistence/models/graphs/trial_graph.py | 1 | 11588 | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Trial Graph Module"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import weakref
from collections import defaultdict
from future.utils import viewitems
from ....utils.data import DotDict
from .structures import prepare_cache
from .structures import Graph
Node = DotDict # pylint: disable=invalid-name
class Summarization(object):
"""Summarization algorithm
Traverses activation tree nodes in preorder.
Creates graph based on caller_id
"""
def __init__(self, preorder):
self.nid = 0
self.root = None
self.stack = []
self.nodes = []
self.matches = defaultdict(dict)
self.edges = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
)
self(preorder)
def graph(self, colors, width=0, height=0): # pylint: disable=too-many-locals
"""Generate JSON"""
min_duration = {}
max_duration = {}
edges = []
trials = set()
for node in self.nodes:
for trial_id, duration in viewitems(node.duration):
min_duration[trial_id] = min(
min_duration.get(trial_id, float('inf')), duration)
max_duration[trial_id] = max(
max_duration.get(trial_id, float('-inf')), duration)
trials.add(trial_id)
for source_nid, targets in viewitems(self.edges):
for target_nid, types in viewitems(targets):
for type_, count in viewitems(types):
edges.append({
'count': count,
'source': source_nid,
'target': target_nid,
'type': type_,
})
tlist = list(trials)
if not tlist:
tlist.append(0)
return {
'root': self.root,
'edges': edges,
'min_duration': min_duration,
'max_duration': max_duration,
'colors': colors,
'trial1': tlist[0],
'trial2': tlist[-1],
'width': width,
'height': height,
}
def merge(self, node, activation):
"""Abstract: Merge activation into node"""
raise NotImplementedError("merge is not implemented")
def calculate_match(self, node):
"""Abstract: Calculate match. Return tuple"""
raise NotImplementedError("calculate_match is not implemented")
def add_edge(self, source, target, type_, count=1):
"""Add edge"""
ids = target.trial_ids
trial_id = 0 if len(ids) > 1 else next(iter(ids))
self.edges[source.index][target.index][type_][trial_id] += count
def insert_node(self, activation, parent, match=None):
"""Create node for activation
Arguments:
activation -- activation element
parent -- previously created parent node
match -- matching key
"""
node = Node(
index=self.nid,
parent_index=-1,
name=activation.name,
caller_id=activation.caller_id or 0,
children=[],
activations=defaultdict(list),
duration=defaultdict(int),
full_tooltip=False,
tooltip=defaultdict(str),
children_index=-1,
trial_ids=[],
has_return=False,
)
self.merge(node, activation)
self.nid += 1
if parent is not None:
node.parent_index = parent.index
node.children_index = len(parent.children)
parent.children.append(node)
if match is not None:
self.matches[parent.index][match] = node
self.nodes.append(node)
return node
def insert_first(self, call):
"""Insert first node
Insert node and create initial edge
"""
self.root = node = self.insert_node(call, None)
self.add_edge(node, node, 'initial')
return node
def insert_call(self, call, last):
"""Insert call
Insert node, create match, and create call edge
"""
self.stack.append(last)
match = self.calculate_match(call)
node = self.matches[last.index].get(match)
if node is None:
node = self.insert_node(call, last, match)
self.add_edge(last, node, 'call')
else:
self.merge(node, call)
node.caller_id = max(node.caller_id, call.caller_id)
return node
def insert_return(self, last):
"""Insert return
Create return edge
"""
temp = self.stack.pop()
if not last.has_return:
self.add_edge(last, temp, 'return')
last.has_return = True
return temp
def insert_sequence(self, call, last):
"""Insert sequence
Check if match exists in last.parent. Create node if it doesn't.
Insert sequence edge from last to call
"""
match = self.calculate_match(call)
node = self.matches[last.parent_index].get(match)
if node is None:
node = self.insert_node(call, self.nodes[last.parent_index], match)
else:
self.merge(node, call)
node.caller_id = max(node.caller_id, call.caller_id)
self.add_edge(last, node, 'sequence')
return node
def __call__(self, preorder):
for call in preorder:
if not call.caller_id:
last = self.insert_first(call)
continue
if call.caller_id > last.caller_id:
last = self.insert_call(call, last)
continue
while call.caller_id < last.caller_id:
last = self.insert_return(last)
if call.caller_id == last.caller_id:
last = self.insert_sequence(call, last)
while self.stack:
last = self.insert_return(last)
return self
class LineNameSummarization(Summarization):
"""Summarize Activations by line and name"""
# ToDo: Diff equivalent
def merge(self, node, activation):
"""Extract id from activation and insert into idlist"""
trial_id = activation.trial_id
if trial_id not in node.trial_ids:
node.trial_ids.append(trial_id)
node.activations[trial_id].append(activation.id)
node.duration[trial_id] += activation.duration
node.tooltip[trial_id] += "T{} - {}<br>Line {}<br>".format(
trial_id, activation.id, activation.line
)
def calculate_match(self, node):
"""Calculate match. Use line and name"""
return (node.line, node.name)
class NoMatchSummarization(LineNameSummarization):
"""Create repr for all nodes. Does not summarize tree"""
# ToDo: Diff equivalent
def __init__(self, preorder):
self.match_id = 0
super(NoMatchSummarization, self).__init__(preorder)
def calculate_match(self, node):
"""No match"""
self.match_id += 1
return self.match_id
def insert_node(self, activation, parent, match=None):
"""Insert node. Create base repr"""
node = super(NoMatchSummarization, self).insert_node(
activation, parent, match
)
node.repr = '{0.line}-{0.name}'.format(activation)
return node
def insert_call(self, call, last):
"""Insert call.
Add opening parenthesis to caller"""
last.repr += "("
return super(NoMatchSummarization, self).insert_call(call, last)
def insert_return(self, last):
"""Insert return.
Add last activation to caller and close parenthesis"""
parent = super(NoMatchSummarization, self).insert_return(last)
parent.repr += last.repr + ")"
return parent
def insert_sequence(self, call, last):
"""Inser last caller and comma to caller"""
if not last.children:
self.nodes[last.parent_index].repr += last.repr + ","
return super(NoMatchSummarization, self).insert_sequence(call, last)
class StructureSummarization(Summarization):
"""Summarize by substructure"""
def merge(self, node, activation):
"""Extract ids from activation node and insert into idlist"""
for trial_id in activation.trial_ids:
node.activations[trial_id].extend(activation.activations[trial_id])
node.duration[trial_id] += activation.duration[trial_id]
node.tooltip[trial_id] += activation.tooltip[trial_id] + "<br>"
if trial_id not in node.trial_ids:
node.trial_ids.append(trial_id)
def calculate_match(self, node):
"""Match by repr"""
return (node.repr,)
def __call__(self, preorder):
return super(StructureSummarization, self).__call__(
NoMatchSummarization(preorder).nodes
)
class TreeSummarization(NoMatchSummarization):
"""Build tree"""
def __call__(self, preorder):
result = super(TreeSummarization, self).__call__(preorder)
self.edges.clear()
stack = [self.root]
while stack:
current = stack.pop()
for index, child in enumerate(current.children):
self.add_edge(current, child, 'call', index)
stack.append(child)
return result
cache = prepare_cache( # pylint: disable=invalid-name
lambda self, *args, **kwargs: "trial {}".format(self.trial.id))
class TrialGraph(Graph):
"""Trial Graph Class
Present trial graph on Jupyter"""
def __init__(self, trial):
self.trial = weakref.proxy(trial)
self.use_cache = True
self.width = 500
self.height = 500
self.mode = 3
self._modes = {
0: self.tree,
1: self.no_match,
2: self.exact_match,
3: self.namespace_match
}
def result(self, summarization):
"""Get summarization graph result"""
return self.trial.finished, summarization.graph(
{self.trial.id: 0}, self.width, self.height
), summarization.nodes
@cache("tree")
def tree(self):
"""Convert tree structure into dict tree structure"""
return self.result(TreeSummarization(self.trial.activations))
@cache("no_match")
def no_match(self):
"""Convert tree structure into dict graph without node matchings"""
return self.result(NoMatchSummarization(self.trial.activations))
@cache("exact_match")
def exact_match(self):
"""Convert tree structure into dict graph and match equal calls"""
return self.result(StructureSummarization(self.trial.activations))
@cache("namespace_match")
def namespace_match(self):
"""Convert tree structure into dict graph and match namespaces"""
return self.result(LineNameSummarization(self.trial.activations))
def _ipython_display_(self):
from IPython.display import display
bundle = {
'application/noworkflow.trial+json': self._modes[self.mode]()[1],
'text/plain': 'Trial {}'.format(self.trial.id),
}
display(bundle, raw=True)
| mit | -3,480,296,621,707,613,000 | 31.459384 | 111 | 0.57853 | false |
iModels/ffci | github/Team.py | 1 | 10674 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.PaginatedList
import github.Repository
import github.NamedUser
class Team(github.GithubObject.CompletableGithubObject):
"""
This class represents Teams. The reference can be found here http://developer.github.com/v3/orgs/teams/
"""
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def members_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._members_count)
return self._members_count.value
@property
def members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def permission(self):
"""
:type: string
"""
self._completeIfNotSet(self._permission)
return self._permission.value
@property
def repos_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._repos_count)
return self._repos_count.value
@property
def repositories_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repositories_url)
return self._repositories_url.value
@property
def slug(self):
"""
:type: string
"""
self._completeIfNotSet(self._slug)
return self._slug.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_members(self, member):
"""
:calls: `PUT /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/members/" + member._identity
)
def add_membership(self, member):
"""
:calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.Nameduser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestjsonandcheck(
"PUT",
self.url + "/memberships/" + member._identity
)
def add_to_repos(self, repo):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/repos/" + repo._identity
)
def delete(self):
"""
:calls: `DELETE /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, name, permission=github.GithubObject.NotSet):
"""
:calls: `PATCH /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param name: string
:param permission: string
:rtype: None
"""
assert isinstance(name, str), name
assert permission is github.GithubObject.NotSet or isinstance(permission, str), permission
post_parameters = {
"name": name,
}
if permission is not github.GithubObject.NotSet:
post_parameters["permission"] = permission
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_members(self):
"""
:calls: `GET /teams/:id/members <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/members",
None
)
def get_repos(self):
"""
:calls: `GET /teams/:id/repos <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/repos",
None
)
def has_in_members(self, member):
"""
:calls: `GET /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/members/" + member._identity
)
return status == 204
def has_in_repos(self, repo):
"""
:calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(repo, github.Repository.Repository), repo
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/repos/" + repo._identity
)
return status == 204
def remove_from_members(self, member):
"""
:calls: `DELETE /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/members/" + member._identity
)
def remove_from_repos(self, repo):
"""
:calls: `DELETE /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/repos/" + repo._identity
)
@property
def _identity(self):
return self.id
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._members_count = github.GithubObject.NotSet
self._members_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._permission = github.GithubObject.NotSet
self._repos_count = github.GithubObject.NotSet
self._repositories_url = github.GithubObject.NotSet
self._slug = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "members_count" in attributes: # pragma no branch
self._members_count = self._makeIntAttribute(attributes["members_count"])
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "permission" in attributes: # pragma no branch
self._permission = self._makeStringAttribute(attributes["permission"])
if "repos_count" in attributes: # pragma no branch
self._repos_count = self._makeIntAttribute(attributes["repos_count"])
if "repositories_url" in attributes: # pragma no branch
self._repositories_url = self._makeStringAttribute(attributes["repositories_url"])
if "slug" in attributes: # pragma no branch
self._slug = self._makeStringAttribute(attributes["slug"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| mit | 1,098,415,611,471,718,000 | 36.321678 | 107 | 0.550684 | false |
Filechaser/nzbToMedia | libs/munkres.py | 7 | 24745 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Documentation is intended to be processed by Epydoc.
"""
Introduction
============
The Munkres module provides an implementation of the Munkres algorithm
(also called the Hungarian algorithm or the Kuhn-Munkres algorithm),
useful for solving the Assignment Problem.
Assignment Problem
==================
Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers
to perform any of *n* jobs. The assignment problem is to assign jobs to
workers in a way that minimizes the total cost. Since each worker can perform
only one job and each job can be assigned to only one worker the assignments
represent an independent set of the matrix *C*.
One way to generate the optimal set is to create all permutations of
the indexes necessary to traverse the matrix so that no row and column
are used more than once. For instance, given this matrix (expressed in
Python)::
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
You could use this code to generate the traversal indexes::
def permute(a, results):
if len(a) == 1:
results.insert(len(results), a)
else:
for i in range(0, len(a)):
element = a[i]
a_copy = [a[j] for j in range(0, len(a)) if j != i]
subresults = []
permute(a_copy, subresults)
for subresult in subresults:
result = [element] + subresult
results.insert(len(results), result)
results = []
permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix
After the call to permute(), the results matrix would look like this::
[[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]]
You could then use that index matrix to loop over the original cost matrix
and calculate the smallest cost of the combinations::
n = len(matrix)
minval = sys.maxsize
for row in range(n):
cost = 0
for col in range(n):
cost += matrix[row][col]
minval = min(cost, minval)
print minval
While this approach works fine for small matrices, it does not scale. It
executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n*
matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600
traversals. Even if you could manage to perform each traversal in just one
millisecond, it would still take more than 133 hours to perform the entire
traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At
an optimistic millisecond per operation, that's more than 77 million years.
The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This
package provides an implementation of that algorithm.
This version is based on
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html.
This version was written for Python by Brian Clapper from the (Ada) algorithm
at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was
clearly adapted from the same web site.)
Usage
=====
Construct a Munkres object::
from munkres import Munkres
m = Munkres()
Then use it to compute the lowest cost assignment from a cost matrix. Here's
a sample program::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
m = Munkres()
indexes = m.compute(matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total cost: %d' % total
Running that program produces::
Lowest cost through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 0) -> 5
(1, 1) -> 3
(2, 2) -> 4
total cost=12
The instantiated Munkres object can be used multiple times on different
matrices.
Non-square Cost Matrices
========================
The Munkres algorithm assumes that the cost matrix is square. However, it's
possible to use a rectangular matrix if you first pad it with 0 values to make
it square. This module automatically pads rectangular cost matrices to make
them square.
Notes:
- The module operates on a *copy* of the caller's matrix, so any padding will
not be seen by the caller.
- The cost matrix must be rectangular or square. An irregular matrix will
*not* work.
Calculating Profit, Rather than Cost
====================================
The cost matrix is just that: A cost matrix. The Munkres algorithm finds
the combination of elements (one from each row and column) that results in
the smallest cost. It's also possible to use the algorithm to maximize
profit. To do that, however, you have to convert your profit matrix to a
cost matrix. The simplest way to do that is to subtract all elements from a
large value. For example::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = []
for row in matrix:
cost_row = []
for col in row:
cost_row += [sys.maxsize - col]
cost_matrix += [cost_row]
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Highest profit through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
Running that program produces::
Highest profit through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 1) -> 9
(1, 0) -> 10
(2, 2) -> 4
total profit=23
The ``munkres`` module provides a convenience method for creating a cost
matrix from a profit matrix. Since it doesn't know whether the matrix contains
floating point numbers, decimals, or integers, you have to provide the
conversion function; but the convenience method takes care of the actual
creation of the cost matrix::
import munkres
cost_matrix = munkres.make_cost_matrix(matrix,
lambda cost: sys.maxsize - cost)
So, the above profit-calculation program can be recast as::
from munkres import Munkres, print_matrix, make_cost_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxsize - cost)
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
References
==========
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
Copyright and License
=====================
This software is released under a BSD license, adapted from
<http://opensource.org/licenses/bsd-license.php>
Copyright (c) 2008 Brian M. Clapper
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name "clapper.org" nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = 'restructuredtext'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import copy
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Munkres', 'make_cost_matrix']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# Info about the module
__version__ = "1.0.7"
__author__ = "Brian Clapper, [email protected]"
__url__ = "http://software.clapper.org/munkres/"
__copyright__ = "(c) 2008 Brian M. Clapper"
__license__ = "BSD-style license"
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Munkres:
"""
Calculate the Munkres solution to the classical assignment problem.
See the module documentation for usage.
"""
def __init__(self):
"""Create a new instance"""
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def make_cost_matrix(profit_matrix, inversion_function):
"""
**DEPRECATED**
Please use the module function ``make_cost_matrix()``.
"""
import munkres
return munkres.make_cost_matrix(profit_matrix, inversion_function)
make_cost_matrix = staticmethod(make_cost_matrix)
def pad_matrix(self, matrix, pad_value=0):
"""
Pad a possibly non-square matrix to make it square.
:Parameters:
matrix : list of lists
matrix to pad
pad_value : int
value to use to pad the matrix
:rtype: list of lists
:return: a new, possibly padded, matrix
"""
max_columns = 0
total_rows = len(matrix)
for row in matrix:
max_columns = max(max_columns, len(row))
total_rows = max(max_columns, total_rows)
new_matrix = []
for row in matrix:
row_len = len(row)
new_row = row[:]
if total_rows > row_len:
# Row too short. Pad it.
new_row += [pad_value] * (total_rows - row_len)
new_matrix += [new_row]
while len(new_matrix) < total_rows:
new_matrix += [[pad_value] * total_rows]
return new_matrix
def compute(self, cost_matrix):
"""
Compute the indexes for the lowest-cost pairings between rows and
columns in the database. Returns a list of (row, column) tuples
that can be used to traverse the matrix.
:Parameters:
cost_matrix : list of lists
The cost matrix. If this cost matrix is not square, it
will be padded with zeros, via a call to ``pad_matrix()``.
(This method does *not* modify the caller's matrix. It
operates on a copy of the matrix.)
**WARNING**: This code handles square and rectangular
matrices. It does *not* handle irregular matrices.
:rtype: list
:return: A list of ``(row, column)`` tuples that describe the lowest
cost path through the matrix
"""
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix(self.n * 2, 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = { 1 : self.__step1,
2 : self.__step2,
3 : self.__step3,
4 : self.__step4,
5 : self.__step5,
6 : self.__step6 }
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if self.marked[i][j] == 1:
results += [(i, j)]
return results
def __copy_matrix(self, matrix):
"""Return an exact copy of the supplied matrix"""
return copy.deepcopy(matrix)
def __make_matrix(self, n, val):
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self):
"""
For each row of the matrix, find the smallest element and
subtract it from every element in its row. Go to Step 2.
"""
C = self.C
n = self.n
for i in range(n):
minval = min(self.C[i])
# Find the minimum value for this row and subtract that minimum
# from every element in the row.
for j in range(n):
self.C[i][j] -= minval
return 2
def __step2(self):
"""
Find a zero (Z) in the resulting matrix. If there is no starred
zero in its row or column, star Z. Repeat for each element in the
matrix. Go to Step 3.
"""
n = self.n
for i in range(n):
for j in range(n):
if (self.C[i][j] == 0) and \
(not self.col_covered[j]) and \
(not self.row_covered[i]):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
self.__clear_covers()
return 3
def __step3(self):
"""
Cover each column containing a starred zero. If K columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
n = self.n
count = 0
for i in range(n):
for j in range(n):
if self.marked[i][j] == 1:
self.col_covered[j] = True
count += 1
if count >= n:
step = 7 # done
else:
step = 4
return step
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while not done:
row = self.__find_star_in_col(path[count][1])
if row >= 0:
count += 1
path[count][0] = row
path[count][1] = path[count-1][1]
else:
done = True
if not done:
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[count-1][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
minval = self.__find_smallest()
for i in range(self.n):
for j in range(self.n):
if self.row_covered[i]:
self.C[i][j] += minval
if not self.col_covered[j]:
self.C[i][j] -= minval
return 4
def __find_smallest(self):
"""Find the smallest uncovered value in the matrix."""
minval = sys.maxsize
for i in range(self.n):
for j in range(self.n):
if (not self.row_covered[i]) and (not self.col_covered[j]):
if minval > self.C[i][j]:
minval = self.C[i][j]
return minval
def __find_a_zero(self):
"""Find the first uncovered element with value 0"""
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if (self.C[i][j] == 0) and \
(not self.row_covered[i]) and \
(not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
return (row, col)
def __find_star_in_row(self, row):
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col
def __convert_path(self, path, count):
for i in range(count+1):
if self.marked[path[i][0]][path[i][1]] == 1:
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self):
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self):
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def make_cost_matrix(profit_matrix, inversion_function):
"""
Create a cost matrix from a profit matrix by calling
'inversion_function' to invert each value. The inversion
function must take one numeric argument (of any type) and return
another numeric argument which is presumed to be the cost inverse
of the original profit.
This is a static method. Call it like this:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
For example:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxsize - x)
:Parameters:
profit_matrix : list of lists
The matrix to convert from a profit to a cost matrix
inversion_function : function
The function to use to invert each entry in the profit matrix
:rtype: list of lists
:return: The converted matrix
"""
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix
def print_matrix(matrix, msg=None):
"""
Convenience function: Displays the contents of a matrix of integers.
:Parameters:
matrix : list of lists
Matrix to print
msg : str
Optional message to print before displaying the matrix
"""
import math
if msg is not None:
print(msg)
# Calculate the appropriate format width.
width = 0
for row in matrix:
for val in row:
width = max(width, int(math.log10(val)) + 1)
# Make the format string
format = '%%%dd' % width
# Print the matrix
for row in matrix:
sep = '['
for val in row:
sys.stdout.write(sep + format % val)
sep = ', '
sys.stdout.write(']\n')
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850), # expected cost
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452), # expected cost
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15)]
m = Munkres()
for cost_matrix, expected_total in matrices:
print_matrix(cost_matrix, msg='cost matrix')
indexes = m.compute(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r][c]
total_cost += x
print('(%d, %d) -> %d' % (r, c, x))
print('lowest cost=%d' % total_cost)
assert expected_total == total_cost
| gpl-3.0 | 377,111,066,161,064,300 | 30.482188 | 82 | 0.545686 | false |
Subsets and Splits