ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4123adfd008d7145b3ef3d2c749dc515b4d2134 | # -*- coding: utf-8 -*-
r"""
Heegner points on elliptic curves over the rational numbers
AUTHORS:
- William Stein (August 2009)-- most of the initial version
- Robert Bradshaw (July 2009) -- an early version of some specific code
EXAMPLES::
sage: E = EllipticCurve('433a')
sage: P = E.heegner_point(-8,3)
sage: z = P.point_exact(201); z
(-4/3 : 1/9*a : 1)
sage: parent(z)
Abelian group of points on Elliptic Curve defined by y^2 + x*y = x^3 + 1 over Number Field in a with defining polynomial x^2 - 12*x + 111
sage: parent(z[0]).discriminant()
-3
sage: E.quadratic_twist(-3).rank()
1
sage: K.<a> = QuadraticField(-8)
sage: K.factor(3)
(Fractional ideal (1/2*a + 1)) * (Fractional ideal (-1/2*a + 1))
Next try an inert prime::
sage: K.factor(5)
Fractional ideal (5)
sage: P = E.heegner_point(-8,5)
sage: z = P.point_exact(300)
sage: z[0].charpoly().factor()
(x^6 + x^5 - 1/4*x^4 + 19/10*x^3 + 31/20*x^2 - 7/10*x + 49/100)^2
sage: z[1].charpoly().factor()
x^12 - x^11 + 6/5*x^10 - 33/40*x^9 - 89/320*x^8 + 3287/800*x^7 - 5273/1600*x^6 + 993/4000*x^5 + 823/320*x^4 - 2424/625*x^3 + 12059/12500*x^2 + 3329/25000*x + 123251/250000
sage: f = P.x_poly_exact(300); f
x^6 + x^5 - 1/4*x^4 + 19/10*x^3 + 31/20*x^2 - 7/10*x + 49/100
sage: f.discriminant().factor()
-1 * 2^-9 * 5^-9 * 7^2 * 281^2 * 1021^2
We find some Mordell-Weil generators in the rank 1 case using Heegner points::
sage: E = EllipticCurve('43a'); P = E.heegner_point(-7)
sage: P.x_poly_exact()
x
sage: P.point_exact()
(0 : 0 : 1)
sage: E = EllipticCurve('997a')
sage: E.rank()
1
sage: E.heegner_discriminants_list(10)
[-19, -23, -31, -35, -39, -40, -52, -55, -56, -59]
sage: P = E.heegner_point(-19)
sage: P.x_poly_exact()
x - 141/49
sage: P.point_exact()
(141/49 : -162/343 : 1)
Here we find that the Heegner point generates a subgroup of index 3::
sage: E = EllipticCurve('92b1')
sage: E.heegner_discriminants_list(1)
[-7]
sage: P = E.heegner_point(-7); z = P.point_exact(); z
(0 : 1 : 1)
sage: E.regulator()
0.0498083972980648
sage: z.height()
0.448275575682583
sage: P = E(1,1); P # a generator
(1 : 1 : 1)
sage: -3*P
(0 : 1 : 1)
sage: E.tamagawa_product()
3
The above is consistent with the following analytic computation::
sage: E.heegner_index(-7)
3.0000?
"""
# ****************************************************************************
# Copyright (C) 2005-2009 William Stein <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function, absolute_import, division
from six.moves import range
from sage.misc.all import verbose, prod
from sage.misc.cachefunc import cached_method
from sage.structure.sage_object import SageObject
from sage.structure.richcmp import (richcmp_method, richcmp,
richcmp_not_equal, rich_to_bool)
import sage.rings.number_field.number_field_element
import sage.rings.number_field.number_field as number_field
import sage.rings.all as rings
from sage.rings.all import (ZZ, GF, QQ, CDF,
Integers, RealField, ComplexField, QuadraticField)
from sage.arith.all import (gcd, xgcd, lcm, prime_divisors, factorial,
binomial)
from sage.rings.factorint import factor_trial_division
from sage.quadratic_forms.all import (BinaryQF,
BinaryQF_reduced_representatives)
from sage.matrix.all import MatrixSpace, matrix
from sage.modular.modsym.p1list import P1List
##################################################################################
#
# The exported functions, which are in most cases enough to get the
# user going working with Heegner points:
#
# heegner_points -- all of them with given level, discriminant, conductor
# heegner_point -- a specific one
#
##################################################################################
def heegner_points(N, D=None, c=None):
"""
Return all Heegner points of given level `N`. Can also restrict
to Heegner points with specified discriminant `D` and optionally
conductor `c`.
INPUT:
- `N` -- level (positive integer)
- `D` -- discriminant (negative integer)
- `c` -- conductor (positive integer)
EXAMPLES::
sage: heegner_points(389,-7)
Set of all Heegner points on X_0(389) associated to QQ[sqrt(-7)]
sage: heegner_points(389,-7,1)
All Heegner points of conductor 1 on X_0(389) associated to QQ[sqrt(-7)]
sage: heegner_points(389,-7,5)
All Heegner points of conductor 5 on X_0(389) associated to QQ[sqrt(-7)]
"""
if D is None and c is None:
return HeegnerPoints_level(N)
if D is not None and c is None:
return HeegnerPoints_level_disc(N, D)
if D is not None and c is not None:
return HeegnerPoints_level_disc_cond(N,D,c)
raise TypeError
def heegner_point(N, D=None, c=1):
"""
Return a specific Heegner point of level `N` with given
discriminant and conductor. If `D` is not specified, then the
first valid Heegner discriminant is used. If `c` is not given,
then `c=1` is used.
INPUT:
- `N` -- level (positive integer)
- `D` -- discriminant (optional: default first valid `D`)
- `c` -- conductor (positive integer, optional, default: 1)
EXAMPLES::
sage: heegner_point(389)
Heegner point 1/778*sqrt(-7) - 185/778 of discriminant -7 on X_0(389)
sage: heegner_point(389,-7)
Heegner point 1/778*sqrt(-7) - 185/778 of discriminant -7 on X_0(389)
sage: heegner_point(389,-7,5)
Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389)
sage: heegner_point(389,-20)
Heegner point 1/778*sqrt(-20) - 165/389 of discriminant -20 on X_0(389)
"""
if D is not None:
return heegner_points(N,D,c)[0]
H = heegner_points(N)
D = H.discriminants(1)[0]
return heegner_points(N,D,c)[0]
##################################################################################
#
# Ring class fields, represented as abstract objects. These do not
# derive from number fields, since we do not need to work with their
# elements, and explicitly representing them as number fields would be
# far too difficult.
#
##################################################################################
class RingClassField(SageObject):
"""
A Ring class field of a quadratic imaginary field of given conductor.
.. NOTE::
This is a *ring* class field, not a ray class field. In
general, the ring class field of given conductor is a subfield
of the ray class field of the same conductor.
EXAMPLES::
sage: heegner_point(37,-7).ring_class_field()
Hilbert class field of QQ[sqrt(-7)]
sage: heegner_point(37,-7,5).ring_class_field()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: heegner_point(37,-7,55).ring_class_field()
Ring class field extension of QQ[sqrt(-7)] of conductor 55
TESTS::
sage: K_c = heegner_point(37,-7).ring_class_field()
sage: type(K_c)
<class 'sage.schemes.elliptic_curves.heegner.RingClassField'>
sage: loads(dumps(K_c)) == K_c
True
"""
def __init__(self, D, c, check=True):
"""
INPUT:
- `D` -- discriminant of quadratic imaginary field
- `c` -- conductor (positive integer coprime to `D`)
- ``check`` -- bool (default: ``True``); whether to check
validity of input
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.RingClassField(-7,5, False)
Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
if check:
D = ZZ(D); c = ZZ(c)
self.__D = D
self.__c = c
def __eq__(self, other):
"""
Used for equality testing.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: K5 = E.heegner_point(-7,5).ring_class_field()
sage: K11 = E.heegner_point(-7,11).ring_class_field()
sage: K5 == K11
False
sage: K5 == K5
True
sage: K11 == 11
False
"""
return isinstance(other, RingClassField) and self.__D == other.__D and self.__c == other.__c
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: K5 = E.heegner_point(-7,5).ring_class_field()
sage: K11 = E.heegner_point(-7,11).ring_class_field()
sage: K5 != K11
True
sage: K5 != K5
False
sage: K11 != 11
True
"""
return not (self == other)
def __hash__(self):
"""
Used for computing hash of ``self``.
.. NOTE::
The hash is equal to the hash of the pair
``(discriminant, conductor)``.
EXAMPLES::
sage: E = EllipticCurve('389a'); K5 = E.heegner_point(-7,5).ring_class_field()
sage: hash(K5) == hash((-7,5))
True
"""
return hash((self.__D, self.__c))
def conductor(self):
"""
Return the conductor of this ring class field.
EXAMPLES::
sage: E = EllipticCurve('389a'); K5 = E.heegner_point(-7,5).ring_class_field()
sage: K5.conductor()
5
"""
return self.__c
def discriminant_of_K(self):
"""
Return the discriminant of the quadratic imaginary field `K` contained in ``self``.
EXAMPLES::
sage: E = EllipticCurve('389a'); K5 = E.heegner_point(-7,5).ring_class_field()
sage: K5.discriminant_of_K()
-7
"""
return self.__D
@cached_method
def ramified_primes(self):
r"""
Return the primes of `\ZZ` that ramify in this ring class field.
EXAMPLES::
sage: E = EllipticCurve('389a'); K55 = E.heegner_point(-7,55).ring_class_field()
sage: K55.ramified_primes()
[5, 7, 11]
sage: E.heegner_point(-7).ring_class_field().ramified_primes()
[7]
"""
return prime_divisors(self.__D * self.__c)
def _repr_(self):
"""
EXAMPLES::
sage: heegner_point(37,-7,55).ring_class_field()._repr_()
'Ring class field extension of QQ[sqrt(-7)] of conductor 55'
sage: heegner_point(37,-7).ring_class_field()._repr_()
'Hilbert class field of QQ[sqrt(-7)]'
"""
c = self.__c
if c == 1:
return "Hilbert class field of QQ[sqrt(%s)]"%self.__D
else:
return "Ring class field extension of QQ[sqrt(%s)] of conductor %s"%(self.__D, self.__c)
@cached_method
def degree_over_K(self):
"""
Return the relative degree of this ring class field over the
quadratic imaginary field `K`.
EXAMPLES::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7,5)
sage: K5 = P.ring_class_field(); K5
Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: K5.degree_over_K()
6
sage: type(K5.degree_over_K())
<... 'sage.rings.integer.Integer'>
sage: E = EllipticCurve('389a'); E.heegner_point(-20).ring_class_field().degree_over_K()
2
sage: E.heegner_point(-20,3).ring_class_field().degree_over_K()
4
sage: kronecker(-20,11)
-1
sage: E.heegner_point(-20,11).ring_class_field().degree_over_K()
24
"""
K = self.quadratic_field()
# Multiply class number by relative degree of the Hilbert class field H over K.
return K.class_number() * self.degree_over_H()
@cached_method
def degree_over_H(self):
"""
Return the degree of this field over the Hilbert class field `H` of `K`.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: E.heegner_point(-59).ring_class_field().degree_over_H()
1
sage: E.heegner_point(-59).ring_class_field().degree_over_K()
3
sage: QuadraticField(-59,'a').class_number()
3
Some examples in which prime dividing c is inert::
sage: heegner_point(37,-7,3).ring_class_field().degree_over_H()
4
sage: heegner_point(37,-7,3^2).ring_class_field().degree_over_H()
12
sage: heegner_point(37,-7,3^3).ring_class_field().degree_over_H()
36
The prime dividing c is split. For example, in the first case
`O_K/cO_K` is isomorphic to a direct sum of two copies of
``GF(2)``, so the units are trivial::
sage: heegner_point(37,-7,2).ring_class_field().degree_over_H()
1
sage: heegner_point(37,-7,4).ring_class_field().degree_over_H()
2
sage: heegner_point(37,-7,8).ring_class_field().degree_over_H()
4
Now c is ramified::
sage: heegner_point(37,-7,7).ring_class_field().degree_over_H()
7
sage: heegner_point(37,-7,7^2).ring_class_field().degree_over_H()
49
Check that :trac:`15218` is solved::
sage: E = EllipticCurve("19a");
sage: s = E.heegner_point(-3,2).ring_class_field().galois_group().complex_conjugation()
sage: H = s.domain(); H.absolute_degree()
2
"""
c = self.__c
if c == 1:
return ZZ(1)
# Let K_c be the ring class field. We have by class field theory that
# Gal(K_c / H) = (O_K / c O_K)^* / ((Z/cZ)^* M),
# where M is the image of the roots of unity of K in (O_K / c O_K)^*.
#
# To compute the cardinality of the above Galois group, we
# first reduce to the case that c = p^e is a prime power
# (since the expression is multiplicative in c).
# Of course, note also that #(Z/cZ)^* = phi(c)
#
# Case 1: p splits in O_K. Then
# #(O_K/p^e*O_K)^* = (#(Z/p^eZ)^*)^2 = phi(p^e)^2, so
# #(O_K/p^e*O_K)^*/(Z/p^eZ)^* = phi(p^e) = p^e - p^(e-1)
#
# Case 2: p is inert in O_K. Then
# #(O_K/p^e O_K)^* = p^(2*e)-p^(2*(e-1))
# so #(O_K/p^e*O_K)^*/(Z/p^eZ)^*
# = (p^(2*e)-p^(2*(e-1)))/(p^e-p^(e-1)) = p^e + p^(e-1).
#
# Case 3: p ramified in O_K. Then
# #(O_K/p^e O_K)^* = p^(2*e) - p^(2*e-1),
# so #(O_K/p^e O_K)^*/#(Z/p^eZ)^* = p^e.
#
# Section 4.2 of Cohen's "Advanced Computational Algebraic
# Number Theory" GTM is also relevant, though Cohen is working
# with *ray* class fields and here we want the cardinality
# of the *ring* class field, which is a subfield.
K = self.quadratic_field()
n = ZZ(1)
for p, e in c.factor():
F = K.factor(p)
if len(F) == 2:
# split case
n *= p**e - p**(e-1)
else:
if F[0][1] > 1:
# ramified case
n *= p**e
else:
# inert case
n *= p**e + p**(e-1)
return (n * ZZ(2)) // K.number_of_roots_of_unity()
@cached_method
def absolute_degree(self):
r"""
Return the absolute degree of this field over `\QQ`.
EXAMPLES::
sage: E = EllipticCurve('389a'); K = E.heegner_point(-7,5).ring_class_field()
sage: K.absolute_degree()
12
sage: K.degree_over_K()
6
"""
return 2*self.degree_over_K()
degree_over_Q = absolute_degree
@cached_method
def quadratic_field(self):
r"""
Return the quadratic imaginary field `K = \QQ(\sqrt{D})`.
EXAMPLES::
sage: E = EllipticCurve('389a'); K = E.heegner_point(-7,5).ring_class_field()
sage: K.quadratic_field()
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
"""
D = self.__D
var = 'sqrt_minus_%s'%(-D)
return number_field.QuadraticField(D,var)
@cached_method
def galois_group(self, base=QQ):
r"""
Return the Galois group of ``self`` over base.
INPUT:
- ``base`` -- (default: `\QQ`) a subfield of ``self`` or `\QQ`
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: A = E.heegner_point(-7,5).ring_class_field()
sage: A.galois_group()
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: B = E.heegner_point(-7).ring_class_field()
sage: C = E.heegner_point(-7,15).ring_class_field()
sage: A.galois_group()
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: A.galois_group(B)
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5 over Hilbert class field of QQ[sqrt(-7)]
sage: A.galois_group().cardinality()
12
sage: A.galois_group(B).cardinality()
6
sage: C.galois_group(A)
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 15 over Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: C.galois_group(A).cardinality()
4
"""
return GaloisGroup(self, base)
def is_subfield(self, M):
"""
Return ``True`` if this ring class field is a subfield of the ring class field `M`.
If `M` is not a ring class field, then a TypeError is raised.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: A = E.heegner_point(-7,5).ring_class_field()
sage: B = E.heegner_point(-7).ring_class_field()
sage: C = E.heegner_point(-20).ring_class_field()
sage: D = E.heegner_point(-7,15).ring_class_field()
sage: B.is_subfield(A)
True
sage: B.is_subfield(B)
True
sage: B.is_subfield(D)
True
sage: B.is_subfield(C)
False
sage: A.is_subfield(B)
False
sage: A.is_subfield(D)
True
"""
if not isinstance(M, RingClassField):
raise TypeError("M must be a ring class field")
return self.quadratic_field() == M.quadratic_field() and \
M.conductor() % self.conductor() == 0
##################################################################################
#
# Galois groups of ring class fields
#
##################################################################################
class GaloisGroup(SageObject):
"""
A Galois group of a ring class field.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: G = E.heegner_point(-7,5).ring_class_field().galois_group(); G
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: G.field()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: G.cardinality()
12
sage: G.complex_conjugation()
Complex conjugation automorphism of Ring class field extension of QQ[sqrt(-7)] of conductor 5
TESTS::
sage: G = heegner_point(37,-7).ring_class_field().galois_group()
sage: loads(dumps(G)) == G
True
sage: type(G)
<class 'sage.schemes.elliptic_curves.heegner.GaloisGroup'>
"""
def __init__(self, field, base=QQ):
r"""
INPUT:
- ``field`` -- a ring class field
- ``base`` -- subfield of field (default: `\QQ`)
EXAMPLES::
sage: K5 = heegner_points(389,-7,5).ring_class_field()
sage: K1 = heegner_points(389,-7,1).ring_class_field()
sage: sage.schemes.elliptic_curves.heegner.GaloisGroup(K5,K1)
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5 over Hilbert class field of QQ[sqrt(-7)]
sage: K5.galois_group(K1)
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5 over Hilbert class field of QQ[sqrt(-7)]
"""
if not isinstance(field, RingClassField):
raise TypeError("field must be of type RingClassField")
if base != QQ and base != field.quadratic_field():
if not isinstance(base, RingClassField):
raise TypeError("base must be of type RingClassField or QQ or quadratic field")
if not base.is_subfield(field):
raise TypeError("base must be a subfield of field")
self.__field = field
self.__base = base
def __eq__(self, G):
"""
EXAMPLES::
sage: G = EllipticCurve('389a').heegner_point(-7,5).ring_class_field().galois_group()
sage: G == G
True
sage: G == 0
False
sage: H = EllipticCurve('389a').heegner_point(-7,11).ring_class_field().galois_group()
sage: G == H
False
"""
return isinstance(G, GaloisGroup) and (G.__field,G.__base) == (self.__field,self.__base)
def __ne__(self, other):
"""
EXAMPLES::
sage: G = EllipticCurve('389a').heegner_point(-7,5).ring_class_field().galois_group()
sage: G != G
False
sage: G != 0
True
sage: H = EllipticCurve('389a').heegner_point(-7,11).ring_class_field().galois_group()
sage: G != H
True
"""
return not (self == other)
def __hash__(self):
"""
Return hash of this Galois group, which is the same as the
hash of the pair, the field and its base.
EXAMPLES::
sage: G = EllipticCurve('389a').heegner_point(-7,5).ring_class_field().galois_group()
sage: hash(G) == hash((G.field(), G.base_field()))
True
"""
return hash((self.__field, self.__base))
def __call__(self, x):
"""
Coerce `x` into ``self``, where `x` is a Galois group element, or
in case ``self`` has base field the Hilbert class field, `x` can
also be an element of the ring of integers.
INPUT:
- `x` -- automorphism or quadratic field element
OUTPUT:
- automorphism (or TypeError)
EXAMPLES::
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K5.galois_group(K1)
sage: G(1)
Class field automorphism defined by x^2 + 325*y^2
sage: G(G[0])
Class field automorphism defined by x^2 + 325*y^2
sage: alpha = 2 + K1.quadratic_field().gen(); alpha
sqrt_minus_52 + 2
sage: G(alpha)
Class field automorphism defined by 14*x^2 - 10*x*y + 25*y^2
A TypeError is raised when the coercion is not possible::
sage: G(0)
Traceback (most recent call last):
...
TypeError: x does not define element of (O_K/c*O_K)^*
"""
if isinstance(x, GaloisAutomorphism) and x.parent() == self:
return x
try:
return self._alpha_to_automorphism(x)
except (ZeroDivisionError, TypeError):
raise TypeError("x does not define element of (O_K/c*O_K)^*")
def _repr_(self):
"""
Return string representation of this Galois group.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: G = E.heegner_point(-7,5).ring_class_field().galois_group()
sage: G._repr_()
'Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5'
"""
if self.base_field() != QQ:
s = " over %s"%self.base_field()
else:
s = ''
return "Galois group of %s%s"%(self.field(), s)
def field(self):
"""
Return the ring class field that this Galois group acts on.
EXAMPLES::
sage: G = heegner_point(389,-7,5).ring_class_field().galois_group()
sage: G.field()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
return self.__field
def base_field(self):
"""
Return the base field, which the field fixed by all the
automorphisms in this Galois group.
EXAMPLES::
sage: x = heegner_point(37,-7,5)
sage: Kc = x.ring_class_field(); Kc
Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: K = x.quadratic_field()
sage: G = Kc.galois_group(); G
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: G.base_field()
Rational Field
sage: G.cardinality()
12
sage: Kc.absolute_degree()
12
sage: G = Kc.galois_group(K); G
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5 over Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: G.cardinality()
6
sage: G.base_field()
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: G = Kc.galois_group(Kc); G
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5 over Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: G.cardinality()
1
sage: G.base_field()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
return self.__base
@cached_method
def kolyvagin_generators(self):
r"""
Assuming this Galois group `G` is of the form
`G=\textrm{Gal}(K_c/K_1)`, with `c=p_1\dots p_n` satisfying the
Kolyvagin hypothesis, this function returns noncanonical
choices of lifts of generators for each of the cyclic factors
of `G` corresponding to the primes dividing `c`. Thus the
`i`-th returned valued is an element of `G` that maps to the
identity element of `\textrm{Gal}(K_p/K_1)` for all `p \neq p_i` and
to a choice of generator of `\textrm{Gal}(K_{p_i}/K_1)`.
OUTPUT:
- list of elements of ``self``
EXAMPLES::
sage: K3 = heegner_points(389,-52,3).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: G.kolyvagin_generators()
(Class field automorphism defined by 9*x^2 - 6*x*y + 14*y^2,)
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K5.galois_group(K1)
sage: G.kolyvagin_generators()
(Class field automorphism defined by 17*x^2 - 14*x*y + 22*y^2,)
"""
M = self.field()
c = M.conductor()
if not (self._base_is_hilbert_class_field() and self.is_kolyvagin()):
raise ValueError("field must be of the form Gal(K_c/K_1)")
if not c.is_prime():
raise NotImplementedError("only implemented when c is prime")
# Since c satisfies Kolyvagin and is prime, the group is cyclic,
# so we just find a generator.
for sigma in self:
if sigma.order() == self.cardinality():
return tuple([sigma])
raise NotImplementedError
@cached_method
def lift_of_hilbert_class_field_galois_group(self):
r"""
Assuming this Galois group `G` is of the form `G=\textrm{Gal}(K_c/K)`,
this function returns noncanonical choices of lifts of the
elements of the quotient group `\textrm{Gal}(K_1/K)`.
OUTPUT:
- tuple of elements of self
EXAMPLES::
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: G = K5.galois_group(K5.quadratic_field())
sage: G.lift_of_hilbert_class_field_galois_group()
(Class field automorphism defined by x^2 + 325*y^2, Class field automorphism defined by 2*x^2 + 2*x*y + 163*y^2)
sage: G.cardinality()
12
sage: K5.quadratic_field().class_number()
2
"""
if not self._base_is_quad_imag_field():
raise ValueError("Galois group must be of the form Gal(K_c/K)")
K = self.base_field()
C = K.class_group()
v = []
lifts = []
for sigma in self:
I = sigma.ideal()
g = C(I)
if g not in v:
v.append(g)
lifts.append(sigma)
return tuple(lifts)
@cached_method
def _list(self):
r"""
Enumerate the elements of ``self``.
EXAMPLES:
Example with order 1 (a special case)::
sage: E = EllipticCurve('389a'); F= E.heegner_point(-7,1).ring_class_field()
sage: G = F.galois_group(F.quadratic_field())
sage: G._list()
(Class field automorphism defined by x^2 + x*y + 2*y^2,)
Example over quadratic imaginary field::
sage: E = EllipticCurve('389a'); F= E.heegner_point(-7,5).ring_class_field()
sage: G = F.galois_group(F.quadratic_field())
sage: G._list()
(Class field automorphism defined by x^2 + x*y + 44*y^2, Class field automorphism defined by 2*x^2 - x*y + 22*y^2, Class field automorphism defined by 2*x^2 + x*y + 22*y^2, Class field automorphism defined by 4*x^2 - x*y + 11*y^2, Class field automorphism defined by 4*x^2 + x*y + 11*y^2, Class field automorphism defined by 7*x^2 + 7*x*y + 8*y^2)
Example over `\QQ` (it is not implemented yet)::
sage: K3 = heegner_points(389,-52,3).ring_class_field()
sage: K3.galois_group()._list()
Traceback (most recent call last):
...
NotImplementedError: Galois group over QQ not yet implemented
Example over Hilbert class field::
sage: K3 = heegner_points(389,-52,3).ring_class_field(); K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: G._list()
(Class field automorphism defined by x^2 + 117*y^2, Class field automorphism defined by 9*x^2 - 6*x*y + 14*y^2, Class field automorphism defined by 9*x^2 + 13*y^2, Class field automorphism defined by 9*x^2 + 6*x*y + 14*y^2)
"""
if self._base_is_QQ():
raise NotImplementedError("Galois group over QQ not yet implemented")
elif self._base_is_quad_imag_field():
# Over the quadratic imaginary field, so straightforward
# enumeration of all reduced primitive binary quadratic
# forms of discriminant D*c^2.
D = self.base_field().discriminant()
c = self.field().conductor()
Q = [f for f in BinaryQF_reduced_representatives(D*c*c) if f.is_primitive()]
v = [GaloisAutomorphismQuadraticForm(self, f) for f in Q]
elif self._base_is_hilbert_class_field() and self.is_kolyvagin():
# Take only the automorphisms in the quad imag case that map to
# a principal ideal.
M = self.field()
K = M.quadratic_field()
v = []
self.__p1_to_automorphism = {}
for sigma in M.galois_group(K)._list():
I = sigma.ideal()
if I.is_principal():
# sigma does define an element of our Galois subgroup.
alpha = sigma.ideal().gens_reduced()[0]
t = GaloisAutomorphismQuadraticForm(self, sigma.quadratic_form(), alpha=alpha)
self.__p1_to_automorphism[t.p1_element()] = t
v.append(t)
else:
raise NotImplementedError("general Galois group not yet implemented")
v.sort()
assert len(v) == self.cardinality(), "bug enumerating Galois group elements"
return tuple(v)
def _quadratic_form_to_alpha(self, f):
"""
INPUT:
- `f` -- a binary quadratic form with discriminant `c^2 D`
OUTPUT:
- an element of the ring of integers of the quadratic
imaginary field
EXAMPLES::
sage: K3 = heegner_points(389,-52,3).ring_class_field(); K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: [G._quadratic_form_to_alpha(s.quadratic_form()) for s in G]
[3/2*sqrt_minus_52, 1/6*sqrt_minus_52 + 1/3, 1/6*sqrt_minus_52, 1/6*sqrt_minus_52 - 1/3]
What happens when we input a quadratic form that has nothing
to do with `G`::
sage: G._quadratic_form_to_alpha(BinaryQF([1,2,3]))
Traceback (most recent call last):
...
ValueError: quadratic form has the wrong discriminant
"""
A,B,C = f
K = self.field().quadratic_field()
if f.discriminant() != self.field().conductor()**2 * K.discriminant():
raise ValueError("quadratic form has the wrong discriminant")
R = K['X']
v = R([C,B,A]).roots()[0][0]
return v
def _alpha_to_automorphism(self, alpha):
r"""
Assuming ``self`` has base field the Hilbert class field, make an
automorphism from the element `\alpha` of the ring of integers
into ``self``.
INPUT:
- `\alpha` -- element of quadratic imaginary field coprime to conductor
EXAMPLES::
sage: K3 = heegner_points(389,-52,3).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: G._alpha_to_automorphism(1)
Class field automorphism defined by x^2 + 117*y^2
sage: [G._alpha_to_automorphism(s.alpha()) for s in G] == list(G)
True
"""
if not self._base_is_hilbert_class_field() and self.is_kolyvagin():
raise TypeError("base must be Hilbert class field with Kolyvagin condition on conductor")
R = self.field().quadratic_field().maximal_order()
uv = self._alpha_to_p1_element(R(alpha))
try:
d = self.__p1_to_automorphism
except AttributeError:
self._list() # computes attribute as side-effect
d = self.__p1_to_automorphism
return d[uv]
def _alpha_to_p1_element(self, alpha):
r"""
Given an element of the ring of integers that is nonzero
modulo c, return canonical (after our fixed choice of basis)
element of the project line corresponding to it.
INPUT:
- `\alpha` -- element of the ring of integers of the
quadratic imaginary field
OUTPUT:
- 2-tuple of integers
EXAMPLES::
sage: K3 = heegner_points(389,-52,3).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: G._alpha_to_p1_element(1)
(1, 0)
sage: sorted([G._alpha_to_p1_element(s.alpha()) for s in G])
[(0, 1), (1, 0), (1, 1), (1, 2)]
"""
try:
A, P1 = self.__alpha_to_p1_element
except AttributeError:
# todo (optimize) -- this whole function can be massively optimized:
M = self.field()
A = M.quadratic_field().maximal_order().free_module()
P1 = P1List(M.conductor())
self.__alpha_to_p1_element = A, P1
alpha = self.field().quadratic_field()(alpha)
w = A.coordinate_vector(alpha.vector())
w *= w.denominator()
w = w.change_ring(ZZ)
n = gcd(w)
w /= n
c = P1.N()
w = P1.normalize(ZZ(w[0])%c, ZZ(w[1])%c)
if w == (0,0):
w = (1,0)
return w
def _p1_element_to_alpha(self, uv):
"""
Convert a normalized pair ``uv=(u,v)`` of integers to the
corresponding element of the ring of integers got by taking `u
b_0 + v b_1` where `b_0, b_1` are the basis for the ring of
integers.
INPUT:
- ``uv`` -- pair of integers
OUTPUT:
- element of maximal order of quadratic field
EXAMPLES::
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K5.galois_group(K1)
sage: v = [G._alpha_to_p1_element(s.alpha()) for s in G]
sage: [G._p1_element_to_alpha(z) for z in v]
[1, 1/2*sqrt_minus_52, 1/2*sqrt_minus_52 + 1, 2*sqrt_minus_52 + 1, sqrt_minus_52 + 1, 3/2*sqrt_minus_52 + 1]
sage: [G(G._p1_element_to_alpha(z)) for z in v] == list(G)
True
"""
B = self.field().quadratic_field().maximal_order().basis()
return uv[0]*B[0] + uv[1]*B[1]
def _base_is_QQ(self):
r"""
Return ``True`` if the base field of this ring class field is `\QQ`.
EXAMPLES::
sage: H = heegner_points(389,-20,3); M = H.ring_class_field()
sage: M.galois_group(H.quadratic_field())._base_is_QQ()
False
sage: M.galois_group(QQ)._base_is_QQ()
True
sage: M.galois_group(heegner_points(389,-20,1).ring_class_field())._base_is_QQ()
False
"""
return self.__base == QQ
def _base_is_quad_imag_field(self):
"""
Return ``True`` if the base field of this ring class field is the
quadratic imaginary field `K`.
EXAMPLES::
sage: H = heegner_points(389,-20,3); M = H.ring_class_field()
sage: M.galois_group(H.quadratic_field())._base_is_quad_imag_field()
True
sage: M.galois_group(QQ)._base_is_quad_imag_field()
False
sage: M.galois_group(heegner_points(389,-20,1).ring_class_field())._base_is_quad_imag_field()
False
"""
return number_field.is_QuadraticField(self.__base)
def is_kolyvagin(self):
"""
Return ``True`` if conductor `c` is prime to the discriminant of the
quadratic field, `c` is squarefree and each prime dividing `c`
is inert.
EXAMPLES::
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: K5.galois_group(K1).is_kolyvagin()
True
sage: K7 = heegner_points(389,-52,7).ring_class_field()
sage: K7.galois_group(K1).is_kolyvagin()
False
sage: K25 = heegner_points(389,-52,25).ring_class_field()
sage: K25.galois_group(K1).is_kolyvagin()
False
"""
M = self.field()
c = M.conductor()
D = M.quadratic_field().discriminant()
if c.gcd(D) != 1:
return False
if not c.is_squarefree():
return False
for p in c.prime_divisors():
if not is_inert(D,p):
return False
return True
def _base_is_hilbert_class_field(self):
"""
Return ``True`` if the base field of this ring class field is the
Hilbert class field of `K` viewed as a ring class field (so
not of data type QuadraticField).
EXAMPLES::
sage: H = heegner_points(389,-20,3); M = H.ring_class_field()
sage: M.galois_group(H.quadratic_field())._base_is_hilbert_class_field()
False
sage: M.galois_group(QQ)._base_is_hilbert_class_field()
False
sage: M.galois_group(heegner_points(389,-20,1).ring_class_field())._base_is_hilbert_class_field()
True
"""
M = self.__base
return isinstance(M, RingClassField) and M.conductor() == 1
def __getitem__(self, i):
"""
EXAMPLES::
sage: E = EllipticCurve('389a'); F= E.heegner_point(-7,5).ring_class_field()
sage: G = F.galois_group(F.quadratic_field())
sage: G[0]
Class field automorphism defined by x^2 + x*y + 44*y^2
"""
return self._list()[i]
def __len__(self):
"""
EXAMPLES::
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K5.galois_group(K1)
sage: G.cardinality()
6
sage: len(G)
6
"""
return self.cardinality()
@cached_method
def cardinality(self):
"""
Return the cardinality of this Galois group.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: G = E.heegner_point(-7,5).ring_class_field().galois_group(); G
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: G.cardinality()
12
sage: G = E.heegner_point(-7).ring_class_field().galois_group()
sage: G.cardinality()
2
sage: G = E.heegner_point(-7,55).ring_class_field().galois_group()
sage: G.cardinality()
120
"""
return self.__field.absolute_degree() // self.__base.absolute_degree()
@cached_method
def complex_conjugation(self):
"""
Return the automorphism of ``self`` determined by complex
conjugation. The base field must be the rational numbers.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: G = E.heegner_point(-7,5).ring_class_field().galois_group()
sage: G.complex_conjugation()
Complex conjugation automorphism of Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
if self.base_field() != QQ:
raise ValueError("the base field must be fixed by complex conjugation")
return GaloisAutomorphismComplexConjugation(self)
##################################################################################
#
# Elements of Galois groups
#
##################################################################################
class GaloisAutomorphism(SageObject):
"""
An abstract automorphism of a ring class field.
.. TODO::
make :class:`GaloisAutomorphism` derive from GroupElement, so
that one gets powers for free, etc.
"""
def __init__(self, parent):
"""
INPUT:
- ``parent`` -- a group of automorphisms of a ring class field
EXAMPLES::
sage: G = heegner_points(389,-7,5).ring_class_field().galois_group(); G
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: sage.schemes.elliptic_curves.heegner.GaloisAutomorphism(G)
<sage.schemes.elliptic_curves.heegner.GaloisAutomorphism object at ...>
"""
self.__parent = parent
def parent(self):
"""
Return the parent of this automorphism, which is a Galois
group of a ring class field.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: s = E.heegner_point(-7,5).ring_class_field().galois_group().complex_conjugation()
sage: s.parent()
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
return self.__parent
def domain(self):
"""
Return the domain of this automorphism.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: s = E.heegner_point(-7,5).ring_class_field().galois_group().complex_conjugation()
sage: s.domain()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
return self.parent().field()
class GaloisAutomorphismComplexConjugation(GaloisAutomorphism):
"""
The complex conjugation automorphism of a ring class field.
EXAMPLES::
sage: conj = heegner_point(37,-7,5).ring_class_field().galois_group().complex_conjugation()
sage: conj
Complex conjugation automorphism of Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: conj.domain()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
TESTS::
sage: type(conj)
<class 'sage.schemes.elliptic_curves.heegner.GaloisAutomorphismComplexConjugation'>
sage: loads(dumps(conj)) == conj
True
"""
def __init__(self, parent):
"""
INPUT:
- ``parent`` -- a group of automorphisms of a ring class field
EXAMPLES::
sage: G = heegner_point(37,-7,5).ring_class_field().galois_group()
sage: sage.schemes.elliptic_curves.heegner.GaloisAutomorphismComplexConjugation(G)
Complex conjugation automorphism of Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
GaloisAutomorphism.__init__(self, parent)
def __hash__(self):
"""
The hash value is the same as the hash value of the
pair ``(self.parent(), 1)``.
EXAMPLES::
sage: G = EllipticCurve('389a').heegner_point(-7,5).ring_class_field().galois_group()
sage: conj = G.complex_conjugation()
sage: hash(conj) == hash((conj.parent(), 1))
True
"""
return hash((self.parent(), 1))
def __eq__(self, right):
"""
EXAMPLES::
sage: G = EllipticCurve('389a').heegner_point(-7,5).ring_class_field().galois_group()
sage: conj = G.complex_conjugation()
sage: conj2 = sage.schemes.elliptic_curves.heegner.GaloisAutomorphismComplexConjugation(G)
sage: conj is conj2
False
sage: conj == conj2
True
"""
return isinstance(right, GaloisAutomorphismComplexConjugation) and \
self.parent() == right.parent()
def __ne__(self, other):
"""
EXAMPLES::
sage: G = EllipticCurve('389a').heegner_point(-7,5).ring_class_field().galois_group()
sage: conj = G.complex_conjugation()
sage: conj2 = sage.schemes.elliptic_curves.heegner.GaloisAutomorphismComplexConjugation(G)
sage: conj != conj2
False
"""
return not (self == other)
def _repr_(self):
"""
Return print representation of the complex conjugation automorphism.
EXAMPLES::
sage: conj = heegner_point(37,-7,5).ring_class_field().galois_group().complex_conjugation()
sage: conj._repr_()
'Complex conjugation automorphism of Ring class field extension of QQ[sqrt(-7)] of conductor 5'
"""
return "Complex conjugation automorphism of %s"%self.domain()
## def __mul__(self, right):
## """
## Return the composition of two automorphisms.
## EXAMPLES::
## sage: ?
## """
## if self.parent() != right.__parent():
## raise TypeError, "automorphisms must be of the same class field"
## raise NotImplementedError
def __invert__(self):
"""
Return the inverse of ``self``, which is just ``self`` again.
EXAMPLES::
sage: conj = heegner_point(37,-7,5).ring_class_field().galois_group().complex_conjugation()
sage: ~conj
Complex conjugation automorphism of Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
return self
def order(self):
"""
EXAMPLES::
sage: conj = heegner_point(37,-7,5).ring_class_field().galois_group().complex_conjugation()
sage: conj.order()
2
"""
return ZZ(2)
@richcmp_method
class GaloisAutomorphismQuadraticForm(GaloisAutomorphism):
"""
An automorphism of a ring class field defined by a quadratic form.
EXAMPLES::
sage: H = heegner_points(389,-20,3)
sage: sigma = H.ring_class_field().galois_group(H.quadratic_field())[0]; sigma
Class field automorphism defined by x^2 + 45*y^2
sage: type(sigma)
<class 'sage.schemes.elliptic_curves.heegner.GaloisAutomorphismQuadraticForm'>
sage: loads(dumps(sigma)) == sigma
True
"""
def __init__(self, parent, quadratic_form, alpha=None):
r"""
INPUT:
- ``parent`` -- a group of automorphisms of a ring class field
- ``quadratic_form`` -- a binary quadratic form that
defines an element of the Galois group of `K_c` over `K`.
- ``\alpha`` -- (default: ``None``) optional data that specified
element corresponding element of `(\mathcal{O}_K /
c\mathcal{O}_K)^* / (\ZZ/c\ZZ)^*`, via class field
theory.
EXAMPLES::
sage: H = heegner_points(389,-20,3); G = H.ring_class_field().galois_group(H.quadratic_field())
sage: f = BinaryQF_reduced_representatives(-20*9)[0]
sage: sage.schemes.elliptic_curves.heegner.GaloisAutomorphismQuadraticForm(G, f)
Class field automorphism defined by x^2 + 45*y^2
"""
self.__quadratic_form = quadratic_form.reduced_form()
self.__alpha = alpha
GaloisAutomorphism.__init__(self, parent)
@cached_method
def order(self):
"""
Return the multiplicative order of this Galois group automorphism.
EXAMPLES::
sage: K3 = heegner_points(389,-52,3).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: sorted([g.order() for g in G])
[1, 2, 4, 4]
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K5.galois_group(K1)
sage: sorted([g.order() for g in G])
[1, 2, 3, 3, 6, 6]
"""
alpha = self.__alpha
if alpha is None:
raise NotImplementedError("order only currently implemented when alpha given in construction")
G = self.parent()
one = G(1).p1_element()
ans = ZZ(1)
z = alpha
for i in range(G.cardinality()):
if G._alpha_to_p1_element(z) == one:
return ans
ans += 1
z *= alpha
assert False, "bug in order"
def alpha(self):
r"""
Optional data that specified element corresponding element of
`(\mathcal{O}_K / c\mathcal{O}_K)^* / (\ZZ/c\ZZ)^*`, via class
field theory.
This is a generator of the ideal corresponding to this
automorphism.
EXAMPLES::
sage: K3 = heegner_points(389,-52,3).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: orb = sorted([g.alpha() for g in G]); orb # random (the sign depends on the database being installed or not)
[1, 1/2*sqrt_minus_52 + 1, -1/2*sqrt_minus_52, 1/2*sqrt_minus_52 - 1]
sage: sorted([x^2 for x in orb]) # this is just for testing
[-13, -sqrt_minus_52 - 12, sqrt_minus_52 - 12, 1]
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K5.galois_group(K1)
sage: orb = sorted([g.alpha() for g in G]); orb # random (the sign depends on the database being installed or not)
[1, -1/2*sqrt_minus_52, 1/2*sqrt_minus_52 + 1, 1/2*sqrt_minus_52 - 1, 1/2*sqrt_minus_52 - 2, -1/2*sqrt_minus_52 - 2]
sage: sorted([x^2 for x in orb]) # just for testing
[-13, -sqrt_minus_52 - 12, sqrt_minus_52 - 12, -2*sqrt_minus_52 - 9, 2*sqrt_minus_52 - 9, 1]
"""
if self.__alpha is None:
raise ValueError("alpha data not defined")
return self.__alpha
@cached_method
def p1_element(self):
r"""
Return element of the projective line corresponding to this
automorphism.
This only makes sense if this automorphism is in the Galois
group `\textrm{Gal}(K_c/K_1)`.
EXAMPLES::
sage: K3 = heegner_points(389,-52,3).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K3.galois_group(K1)
sage: sorted([g.p1_element() for g in G])
[(0, 1), (1, 0), (1, 1), (1, 2)]
sage: K5 = heegner_points(389,-52,5).ring_class_field()
sage: K1 = heegner_points(389,-52,1).ring_class_field()
sage: G = K5.galois_group(K1)
sage: sorted([g.p1_element() for g in G])
[(0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4)]
"""
return self.parent()._alpha_to_p1_element(self.__alpha)
def __hash__(self):
"""
The hash value is the hash of the pair formed by the parent
and the quadratic form read as tuple.
EXAMPLES::
sage: H = heegner_points(389,-20,3)
sage: s = H.ring_class_field().galois_group(H.quadratic_field())[0]
sage: hash(s) == hash((s.parent(), tuple(s.quadratic_form())))
True
"""
return hash((self.parent(), tuple(self.__quadratic_form)))
def __richcmp__(self, right, op):
"""
Comparison.
EXAMPLES::
sage: H = heegner_points(389,-7,5)
sage: s = H.ring_class_field().galois_group(H.quadratic_field())[1]
sage: s == s
True
sage: s == s*s
False
sage: s == s*s*s*s*s
False
sage: s == s*s*s*s*s*s*s
True
sage: H = heegner_points(389,-20,3)
sage: s = H.ring_class_field().galois_group(H.quadratic_field())[0]
sage: s == s
True
sage: s == 0
False
"""
if not isinstance(right, GaloisAutomorphismQuadraticForm):
return NotImplemented
lx = self.parent()
rx = right.parent()
if lx != rx:
return richcmp_not_equal(lx, rx, op)
if self.quadratic_form().is_equivalent(right.quadratic_form()):
return rich_to_bool(op, 0)
return richcmp(self.quadratic_form(), right.quadratic_form(), op)
def _repr_(self):
"""
Return string representation of this automorphism.
EXAMPLES::
sage: H = heegner_points(389,-20,3); s = H.ring_class_field().galois_group(H.quadratic_field())[0]
sage: s._repr_()
'Class field automorphism defined by x^2 + 45*y^2'
"""
return "Class field automorphism defined by %s"%self.__quadratic_form
def __mul__(self, right):
"""
Return the composition of two automorphisms.
EXAMPLES::
sage: H = heegner_points(389,-20,3); s = H.ring_class_field().galois_group(H.quadratic_field())[0]
sage: s * s
Class field automorphism defined by x^2 + 45*y^2
sage: G = s.parent(); list(G)
[Class field automorphism defined by x^2 + 45*y^2, Class field automorphism defined by 2*x^2 + 2*x*y + 23*y^2, Class field automorphism defined by 5*x^2 + 9*y^2, Class field automorphism defined by 7*x^2 + 4*x*y + 7*y^2]
sage: G[0]*G[0]
Class field automorphism defined by x^2 + 45*y^2
sage: G[1]*G[2] == G[3]
True
"""
if self.parent() != right.parent():
raise TypeError("automorphisms must be of the same class field")
if not isinstance(right, GaloisAutomorphismQuadraticForm):
# TODO: special case when right is complex conjugation
raise NotImplementedError
Q = (self.__quadratic_form * right.__quadratic_form).reduced_form()
if self.__alpha and right.__alpha:
alpha = self.__alpha * right.__alpha
else:
alpha = None
return GaloisAutomorphismQuadraticForm(self.parent(), Q, alpha=alpha)
def quadratic_form(self):
"""
Return reduced quadratic form corresponding to this Galois
automorphism.
EXAMPLES::
sage: H = heegner_points(389,-20,3); s = H.ring_class_field().galois_group(H.quadratic_field())[0]
sage: s.quadratic_form()
x^2 + 45*y^2
"""
return self.__quadratic_form
@cached_method
def ideal(self):
r"""
Return ideal of ring of integers of quadratic imaginary field
corresponding to this quadratic form. This is the ideal
`I = \left(A, \frac{-B+ c\sqrt{D}}{2}\right) \mathcal{O}_K`.
EXAMPLES::
sage: E = EllipticCurve('389a'); F= E.heegner_point(-20,3).ring_class_field()
sage: G = F.galois_group(F.quadratic_field())
sage: G[1].ideal()
Fractional ideal (2, 1/2*sqrt_minus_20 + 1)
sage: [s.ideal().gens() for s in G]
[(1, 3/2*sqrt_minus_20), (2, 3/2*sqrt_minus_20 - 1), (5, 3/2*sqrt_minus_20), (7, 3/2*sqrt_minus_20 - 2)]
"""
M = self.parent().field()
K = M.quadratic_field()
f = self.quadratic_form()
c = M.conductor()
sqrtD = K.gen()
(A,B,C) = f
if A%c == 0:
A, C = C, A
return K.maximal_order().ideal([A, (-B+c*sqrtD)/2])
## def __call__(self, z):
## """
## Return image of the Heegner point `z` under this automorphism.
##
## INPUT:
##
## - `z` -- a Heegner point on `X_0(N)` or an elliptic curve
##
## OUTPUT:
##
## - a Heegner point
##
## EXAMPLES::
##
## sage: x = heegner_point(389,-20,3); F = x.ring_class_field()
## sage: sigma = F.galois_group(F.quadratic_field())[1]; sigma
## Class field automorphism defined by 2*x^2 + 2*x*y + 23*y^2
## sage: sigma(x)
## Heegner point 3/1556*sqrt(-20) - 495/778 of discriminant -20 and conductor 3 on X_0(389)
## """
## if isinstance(z, HeegnerPointOnX0N):
## if z.ring_class_field() != self.domain():
## raise NotImplementedError, "class fields must be the same"
## # TODO -- check more compatibilities?
## # TODO -- this is surely backwards -- something must be inverted?
## f = z.quadratic_form() * self.quadratic_form()
## # TODO -- put f into the correct form with A divisible by N, etc.?
## # That could be done by looking up reduced form of f in a canonical
## # list of best reps.
## N,D,c = z.level(),z.discriminant(),z.conductor()
## return HeegnerPointOnX0N(N,D,c, f = f)
## else:
## raise NotImplementedError
##################################################################################
#
# Specific Heegner points
#
##################################################################################
@richcmp_method
class HeegnerPoint(SageObject):
r"""
A Heegner point of level `N`, discriminant `D` and conductor `c`
is any point on a modular curve or elliptic curve that is
concocted in some way from a quadratic imaginary `\tau` in the upper
half plane with `\Delta(\tau) = D c = \Delta(N \tau)`.
EXAMPLES::
sage: x = sage.schemes.elliptic_curves.heegner.HeegnerPoint(389,-7,13); x
Heegner point of level 389, discriminant -7, and conductor 13
sage: type(x)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoint'>
sage: loads(dumps(x)) == x
True
"""
def __init__(self, N, D, c):
"""
INPUT:
- `N` -- (positive integer) the level
- `D` -- (negative integer) fundamental discriminant
- `c` -- (positive integer) conductor
Since this is an abstract base class, no type or compatibility
checks are done, as those are all assumed to be done in the
derived class.
EXAMPLES::
sage: H = sage.schemes.elliptic_curves.heegner.HeegnerPoint(389,-7,5)
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoint'>
"""
self.__N = N
self.__D = D
self.__c = c
def __richcmp__(self, x, op):
"""
Compare two Heegner points.
EXAMPLES::
sage: H = sage.schemes.elliptic_curves.heegner.HeegnerPoint(389,-7,5)
sage: H == H
True
sage: H = sage.schemes.elliptic_curves.heegner.HeegnerPoint(389,-7,5); type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoint'>
sage: J = sage.schemes.elliptic_curves.heegner.HeegnerPoint(389,-7,11)
sage: H == H
True
sage: H == J
False
sage: J == H
False
sage: H == 0
False
"""
if not isinstance(x, HeegnerPoint):
return NotImplemented
return richcmp((self.__N, self.__D, self.__c),
(x.__N, x.__D, x.__c), op)
def _repr_(self):
"""
EXAMPLES::
sage: H = sage.schemes.elliptic_curves.heegner.HeegnerPoint(389,-7,5)
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoint'>
sage: H._repr_()
'Heegner point of level 389, discriminant -7, and conductor 5'
"""
return "Heegner point of level %s, discriminant %s, and conductor %s"%(
self.__N, self.__D, self.__c)
def __hash__(self):
"""
The hash value is obtained from level, discriminant, and conductor.
EXAMPLES::
sage: H = sage.schemes.elliptic_curves.heegner.HeegnerPoint(389,-7,5); type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoint'>
sage: hash(H) == hash((H.level(), H.discriminant(), H.conductor()))
True
"""
return hash((self.__N, self.__D, self.__c))
def level(self):
"""
Return the level of this Heegner point, which is the level of the
modular curve `X_0(N)` on which this is a Heegner point.
EXAMPLES::
sage: heegner_point(389,-7,5).level()
389
"""
return self.__N
def conductor(self):
"""
Return the conductor of this Heegner point.
EXAMPLES::
sage: heegner_point(389,-7,5).conductor()
5
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67,7); P
Kolyvagin point of discriminant -67 and conductor 7 on elliptic curve of conductor 37
sage: P.conductor()
7
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5); P.conductor()
5
"""
return self.__c
def discriminant(self):
"""
Return the discriminant of the quadratic imaginary field
associated to this Heegner point.
EXAMPLES::
sage: heegner_point(389,-7,5).discriminant()
-7
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67,7); P
Kolyvagin point of discriminant -67 and conductor 7 on elliptic curve of conductor 37
sage: P.discriminant()
-67
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5); P.discriminant()
-7
"""
return self.__D
@cached_method
def quadratic_field(self):
"""
Return the quadratic number field of discriminant `D`.
EXAMPLES::
sage: x = heegner_point(37,-7,5)
sage: x.quadratic_field()
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: E = EllipticCurve('37a'); P = E.heegner_point(-40)
sage: P.quadratic_field()
Number Field in sqrt_minus_40 with defining polynomial x^2 + 40 with sqrt_minus_40 = 6.324555320336759?*I
sage: P.quadratic_field() is P.quadratic_field()
True
sage: type(P.quadratic_field())
<class 'sage.rings.number_field.number_field.NumberField_quadratic_with_category'>
"""
return self.ring_class_field().quadratic_field()
@cached_method
def quadratic_order(self):
"""
Return the order in the quadratic imaginary field of conductor
`c`, where `c` is the conductor of this Heegner point.
EXAMPLES::
sage: heegner_point(389,-7,5).quadratic_order()
Order in Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: heegner_point(389,-7,5).quadratic_order().basis()
[1, 5*sqrt_minus_7]
sage: E = EllipticCurve('37a'); P = E.heegner_point(-40,11)
sage: P.quadratic_order()
Order in Number Field in sqrt_minus_40 with defining polynomial x^2 + 40 with sqrt_minus_40 = 6.324555320336759?*I
sage: P.quadratic_order().basis()
[1, 11*sqrt_minus_40]
"""
K = self.quadratic_field()
return K.order([1,self.conductor()*K.gen()])
@cached_method
def ring_class_field(self):
"""
Return the ring class field associated to this Heegner point.
This is an extension `K_c` over `K`, where `K` is the
quadratic imaginary field and `c` is the conductor associated
to this Heegner point. This Heegner point is defined over
`K_c` and the Galois group `Gal(K_c/K)` acts transitively on
the Galois conjugates of this Heegner point.
EXAMPLES::
sage: E = EllipticCurve('389a'); K.<a> = QuadraticField(-5)
sage: len(K.factor(5))
1
sage: len(K.factor(23))
2
sage: E.heegner_point(-7, 5).ring_class_field().degree_over_K()
6
sage: E.heegner_point(-7, 23).ring_class_field().degree_over_K()
22
sage: E.heegner_point(-7, 5*23).ring_class_field().degree_over_K()
132
sage: E.heegner_point(-7, 5^2).ring_class_field().degree_over_K()
30
sage: E.heegner_point(-7, 7).ring_class_field().degree_over_K()
7
"""
return RingClassField(self.discriminant(), self.conductor())
##################################################################################
#
# Sets of Heegner points
#
##################################################################################
class HeegnerPoints(SageObject):
"""
The set of Heegner points with given parameters.
EXAMPLES::
sage: H = heegner_points(389); H
Set of all Heegner points on X_0(389)
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoints_level'>
sage: isinstance(H, sage.schemes.elliptic_curves.heegner.HeegnerPoints)
True
"""
def __init__(self, N):
"""
INPUT:
- `N` -- level, a positive integer
EXAMPLES::
sage: heegner_points(37)
Set of all Heegner points on X_0(37)
sage: heegner_points(0)
Traceback (most recent call last):
...
ValueError: N must a positive integer
"""
self.__N = ZZ(N)
if self.__N <= 0:
raise ValueError("N must a positive integer")
def level(self):
"""
Return the level `N` of the modular curve `X_0(N)`.
EXAMPLES::
sage: heegner_points(389).level()
389
"""
return self.__N
class HeegnerPoints_level(HeegnerPoints):
"""
Return the infinite set of all Heegner points on `X_0(N)` for all
quadratic imaginary fields.
EXAMPLES::
sage: H = heegner_points(11); H
Set of all Heegner points on X_0(11)
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoints_level'>
sage: loads(dumps(H)) == H
True
"""
def __eq__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(11)
sage: H == heegner_points(13)
False
sage: H == heegner_points(11)
True
sage: H == 0
False
"""
return isinstance(other, HeegnerPoints_level) and self.level() == other.level()
def __ne__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(11)
sage: H != heegner_points(13)
True
sage: H != heegner_points(11)
False
sage: H != 0
True
"""
return not (self == other)
def _repr_(self):
"""
Return string representation of the set of Heegner points.
EXAMPLES::
sage: heegner_points(389)._repr_()
'Set of all Heegner points on X_0(389)'
"""
return "Set of all Heegner points on X_0(%s)"%self.level()
def reduce_mod(self, ell):
r"""
Return object that allows for computation with Heegner points
of level `N` modulo the prime `\ell`, represented using
quaternion algebras.
INPUT:
- `\ell` -- prime
EXAMPLES::
sage: heegner_points(389).reduce_mod(7).quaternion_algebra()
Quaternion Algebra (-1, -7) with base ring Rational Field
"""
return HeegnerQuatAlg(self.level(), ell)
def discriminants(self, n=10, weak=False):
r"""
Return the first `n` quadratic imaginary discriminants that
satisfy the Heegner hypothesis for `N`.
INPUT:
- `n` -- nonnegative integer
- ``weak`` -- bool (default: ``False``); if ``True`` only require
weak Heegner hypothesis, which is the same as usual but
without the condition that `\gcd(D,N)=1`.
EXAMPLES::
sage: X = heegner_points(37)
sage: X.discriminants(5)
[-7, -11, -40, -47, -67]
The default is 10::
sage: X.discriminants()
[-7, -11, -40, -47, -67, -71, -83, -84, -95, -104]
sage: X.discriminants(15)
[-7, -11, -40, -47, -67, -71, -83, -84, -95, -104, -107, -115, -120, -123, -127]
The discriminant -111 satisfies only the weak Heegner hypothesis, since it
is divisible by 37::
sage: X.discriminants(15,weak=True)
[-7, -11, -40, -47, -67, -71, -83, -84, -95, -104, -107, -111, -115, -120, -123]
"""
N = self.level()
n = ZZ(n)
v = []
D = ZZ(-4)
while len(v) < n:
D -= 1
if satisfies_weak_heegner_hypothesis(N,D):
# if not weak, then also require gcd(D,N)=1
if not weak and D.gcd(N) != 1:
continue
v.append(D)
return v
class HeegnerPoints_level_disc(HeegnerPoints):
"""
Set of Heegner points of given level and all conductors associated
to a quadratic imaginary field.
EXAMPLES::
sage: H = heegner_points(389,-7); H
Set of all Heegner points on X_0(389) associated to QQ[sqrt(-7)]
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoints_level_disc'>
sage: H._repr_()
'Set of all Heegner points on X_0(389) associated to QQ[sqrt(-7)]'
sage: H.discriminant()
-7
sage: H.quadratic_field()
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: H.kolyvagin_conductors()
[1, 3, 5, 13, 15, 17, 19, 31, 39, 41]
sage: loads(dumps(H)) == H
True
"""
def __init__(self, N, D):
"""
INPUT:
- `N` -- positive integer
- `D` -- negative fundamental discriminant
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.HeegnerPoints_level_disc(37,-7)
Set of all Heegner points on X_0(37) associated to QQ[sqrt(-7)]
"""
HeegnerPoints.__init__(self, N)
D = ZZ(D)
if not satisfies_weak_heegner_hypothesis(N,D):
raise ValueError("D (=%s) must satisfy the weak Heegner hypothesis for N (=%s)"%(D,N))
self.__D = D
def __eq__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(389,-7)
sage: H == heegner_points(389,-7)
True
sage: H == 0
False
sage: H == heegner_points(389,-11)
False
"""
return isinstance(other, HeegnerPoints_level_disc) and \
self.level() == other.level() and self.__D == other.__D
def __ne__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(389,-7)
sage: H != heegner_points(389,-7)
False
sage: H != 0
True
sage: H != heegner_points(389,-11)
True
"""
return not (self == other)
def _repr_(self):
"""
Return string representation of the set of Heegner points for a given
quadratic field.
EXAMPLES::
sage: heegner_points(389,-7)._repr_()
'Set of all Heegner points on X_0(389) associated to QQ[sqrt(-7)]'
"""
return "Set of all Heegner points on X_0(%s) associated to QQ[sqrt(%s)]"%(
self.level(), self.discriminant())
def discriminant(self):
r"""
Return the discriminant of the quadratic imaginary extension `K`.
EXAMPLES::
sage: heegner_points(389,-7).discriminant()
-7
"""
return self.__D
@cached_method
def quadratic_field(self):
r"""
Return the quadratic imaginary field `K = \QQ(\sqrt{D})`.
EXAMPLES::
sage: E = EllipticCurve('389a'); K = E.heegner_point(-7,5).ring_class_field()
sage: K.quadratic_field()
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
"""
D = self.__D
var = 'sqrt_minus_%s'%(-D)
return number_field.QuadraticField(D,var)
def kolyvagin_conductors(self, r=None, n=10, E=None, m=None):
r"""
Return the first `n` conductors that are squarefree products
of distinct primes inert in the quadratic imaginary field
`K = \QQ(\sqrt{D})`. If `r` is specified, return only
conductors that are a product of `r` distinct primes all inert
in `K`. If `r = 0`, always return the list ``[1]``,
no matter what.
If the optional elliptic curve `E` and integer `m` are given,
then only include conductors `c` such that for each prime
divisor `p` of `c` we have `m \mid \gcd(a_p(E), p+1)`.
INPUT:
- `r` -- (default: ``None``) nonnegative integer or ``None``
- `n` -- positive integer
- `E` -- an elliptic curve
- `m` -- a positive integer
EXAMPLES::
sage: H = heegner_points(389,-7)
sage: H.kolyvagin_conductors(0)
[1]
sage: H.kolyvagin_conductors(1)
[3, 5, 13, 17, 19, 31, 41, 47, 59, 61]
sage: H.kolyvagin_conductors(1,15)
[3, 5, 13, 17, 19, 31, 41, 47, 59, 61, 73, 83, 89, 97, 101]
sage: H.kolyvagin_conductors(1,5)
[3, 5, 13, 17, 19]
sage: H.kolyvagin_conductors(1,5,EllipticCurve('389a'),3)
[5, 17, 41, 59, 83]
sage: H.kolyvagin_conductors(2,5,EllipticCurve('389a'),3)
[85, 205, 295, 415, 697]
"""
D = self.__D
if not satisfies_weak_heegner_hypothesis(self.level(),D):
raise ValueError("D must satisfy the weak Heegner hypothesis")
n = ZZ(n)
if n <= 0:
raise ValueError("n must be a positive integer")
if r is not None:
r = ZZ(r)
if r < 0:
raise ValueError("n must be a nonnegative integer")
if r == 0:
return [ZZ(1)]
c = ZZ(1)
v = []
N = self.level()
if E is not None:
m = ZZ(m)
while len(v) < n:
if is_kolyvagin_conductor(N, E, D, r, m, c):
v.append(c)
c += 1
return v
def is_kolyvagin_conductor(N, E, D, r, n, c):
r"""
Return ``True`` if `c` is a Kolyvagin conductor for level `N`,
discriminant `D`, mod `n`, etc., i.e., `c` is divisible by exactly
`r` prime factors, is coprime to `ND`, each prime dividing `c` is
inert, and if `E` is not ``None`` then `n | \gcd(p+1, a_p(E))`
for each prime `p` dividing `c`.
INPUT:
- `N` -- level (positive integer)
- `E` -- elliptic curve or ``None``
- `D` -- negative fundamental discriminant
- `r` -- number of prime factors (nonnegative integer) or ``None``
- `n` -- torsion order (i.e., do we get class in `(E(K_c)/n E(K_c))^{Gal(K_c/K)}`?)
- `c` -- conductor (positive integer)
EXAMPLES::
sage: from sage.schemes.elliptic_curves.heegner import is_kolyvagin_conductor
sage: is_kolyvagin_conductor(389,None,-7,1,None,5)
True
sage: is_kolyvagin_conductor(389,None,-7,1,None,7)
False
sage: is_kolyvagin_conductor(389,None,-7,1,None,11)
False
sage: is_kolyvagin_conductor(389,EllipticCurve('389a'),-7,1,3,5)
True
sage: is_kolyvagin_conductor(389,EllipticCurve('389a'),-7,1,11,5)
False
"""
ND = N*D
if ND.gcd(c) != 1:
return False
if not c.is_squarefree():
return False
P = c.prime_factors()
if r is not None and len(P) != r:
return False
# check that each prime in P is inert in K
for p in P:
if D.kronecker(p) != -1:
return False
if E is not None and n is not None:
for p in P:
if (p+1).gcd(E.ap(p)) % n != 0:
return False
return True
class HeegnerPoints_level_disc_cond(HeegnerPoints_level, HeegnerPoints_level_disc):
"""
The set of Heegner points of given level, discriminant, and conductor.
EXAMPLES::
sage: H = heegner_points(389,-7,5); H
All Heegner points of conductor 5 on X_0(389) associated to QQ[sqrt(-7)]
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoints_level_disc_cond'>
sage: H.discriminant()
-7
sage: H.level()
389
sage: len(H.points())
12
sage: H.points()[0]
Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389)
sage: H.betas()
(147, 631)
sage: H.quadratic_field()
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: H.ring_class_field()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
sage: H.kolyvagin_conductors()
[1, 3, 5, 13, 15, 17, 19, 31, 39, 41]
sage: H.satisfies_kolyvagin_hypothesis()
True
sage: H = heegner_points(389,-7,5)
sage: loads(dumps(H)) == H
True
"""
def __init__(self, N, D, c=ZZ(1)):
"""
Create set of Heegner points.
INPUT:
- `N` -- positive integer (the level)
- `D` -- negative fundamental discriminant
- `c` -- conductor (default: 1)
EXAMPLES::
sage: H = heegner_points(389,-7,5); H
All Heegner points of conductor 5 on X_0(389) associated to QQ[sqrt(-7)]
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPoints_level_disc_cond'>
"""
HeegnerPoints_level.__init__(self, N)
HeegnerPoints_level_disc.__init__(self, N, D)
self.__c = ZZ(c)
def __eq__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(389,-7, 3)
sage: H == heegner_points(389,-7, 3)
True
sage: H == heegner_points(389,-7, 1)
False
sage: H == 0
False
"""
return isinstance(other, HeegnerPoints_level_disc_cond) and \
self.level() == other.level() and self.discriminant() == other.discriminant() \
and self.conductor() == other.conductor()
def __ne__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(389,-7, 3)
sage: H != heegner_points(389,-7, 3)
False
sage: H != heegner_points(389,-7, 1)
True
sage: H != 0
True
"""
return not (self == other)
def _repr_(self):
"""
Return string representation of this set of Heegner points.
EXAMPLES::
sage: H = heegner_points(37,-7,5); H._repr_()
'All Heegner points of conductor 5 on X_0(37) associated to QQ[sqrt(-7)]'
"""
return "All Heegner points of conductor %s on X_0(%s) associated to QQ[sqrt(%s)]"%(
self.conductor(), self.level(), self.discriminant())
def conductor(self):
"""
Return the level of the conductor.
EXAMPLES::
sage: heegner_points(389,-7,5).conductor()
5
"""
return self.__c
@cached_method
def satisfies_kolyvagin_hypothesis(self):
"""
Return ``True`` if ``self`` satisfies the Kolyvagin hypothesis, i.e.,
that each prime dividing the conductor `c` of ``self`` is inert in
`K` and coprime to `ND`.
EXAMPLES:
The prime 5 is inert, but the prime 11 is not::
sage: heegner_points(389,-7,5).satisfies_kolyvagin_hypothesis()
True
sage: heegner_points(389,-7,11).satisfies_kolyvagin_hypothesis()
False
"""
return is_kolyvagin_conductor(N=self.level(), E=None, D=self.discriminant(),
r=None, n=None, c=self.conductor())
@cached_method
def ring_class_field(self):
"""
Return the ring class field associated to this set of Heegner
points. This is an extension `K_c` over `K`, where `K` is the
quadratic imaginary field and `c` the conductor associated to
this Heegner point. This Heegner point is defined over `K_c`
and the Galois group `Gal(K_c/K)` acts transitively on the
Galois conjugates of this Heegner point.
EXAMPLES::
sage: heegner_points(389,-7,5).ring_class_field()
Ring class field extension of QQ[sqrt(-7)] of conductor 5
"""
return RingClassField(self.discriminant(), self.conductor())
def __getitem__(self, i):
"""
Return the `i`-th Heegner point.
EXAMPLES::
sage: H = heegner_points(389,-7,5)
sage: len(H)
12
sage: H[0]
Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389)
sage: H[-1]
Heegner point 5/5446*sqrt(-7) - 757/778 of discriminant -7 and conductor 5 on X_0(389)
"""
return self.points()[i]
def __len__(self):
"""
Return the number of Heegner points.
EXAMPLES::
sage: len(heegner_points(389,-7,5))
12
When the conductor is 1 the length is a power of 2 (number of
square roots of `D` mod `4N` reduced mod `2N`) times the class
number::
sage: len(heegner_points(389,-20,1))
4
sage: QQ[sqrt(-20)].class_number()
2
"""
return len(self.points())
@cached_method
def betas(self):
"""
Return the square roots of `D c^2` modulo `4 N` all reduced
mod `2 N`, without multiplicity.
EXAMPLES::
sage: X = heegner_points(45,-11,1); X
All Heegner points of conductor 1 on X_0(45) associated to QQ[sqrt(-11)]
sage: [x.quadratic_form() for x in X]
[45*x^2 + 13*x*y + y^2,
45*x^2 + 23*x*y + 3*y^2,
45*x^2 + 67*x*y + 25*y^2,
45*x^2 + 77*x*y + 33*y^2]
sage: X.betas()
(13, 23, 67, 77)
sage: X.points(13)
(Heegner point 1/90*sqrt(-11) - 13/90 of discriminant -11 on X_0(45),)
sage: [x.quadratic_form() for x in X.points(13)]
[45*x^2 + 13*x*y + y^2]
"""
c = self.__c
D = self.discriminant()*c*c
N = self.level()
R = Integers(4*N)
m = 2*N
return tuple(sorted( set([a%m for a in R(D).sqrt(all=True)]) ))
@cached_method
def points(self, beta=None):
r"""
Return the Heegner points in ``self``. If `\beta` is given,
return only those Heegner points with given `\beta`, i.e.,
whose quadratic form has `B` congruent to `\beta` modulo `2 N`.
Use ``self.beta()`` to get a list of betas.
EXAMPLES::
sage: H = heegner_points(389,-7,5); H
All Heegner points of conductor 5 on X_0(389) associated to QQ[sqrt(-7)]
sage: H.points()
(Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389), ..., Heegner point 5/5446*sqrt(-7) - 757/778 of discriminant -7 and conductor 5 on X_0(389))
sage: H.betas()
(147, 631)
sage: [x.tau() for x in H.points(147)]
[5/778*sqrt_minus_7 - 147/778, 5/1556*sqrt_minus_7 - 147/1556, 5/1556*sqrt_minus_7 - 925/1556, 5/3112*sqrt_minus_7 - 1703/3112, 5/3112*sqrt_minus_7 - 2481/3112, 5/5446*sqrt_minus_7 - 21/778]
sage: [x.tau() for x in H.points(631)]
[5/778*sqrt_minus_7 - 631/778, 5/1556*sqrt_minus_7 - 631/1556, 5/1556*sqrt_minus_7 - 1409/1556, 5/3112*sqrt_minus_7 - 631/3112, 5/3112*sqrt_minus_7 - 1409/3112, 5/5446*sqrt_minus_7 - 757/778]
The result is cached and is a tuple (since it is immutable)::
sage: H.points() is H.points()
True
sage: type(H.points())
<... 'tuple'>
"""
if beta is None:
SDN = self.betas()
return tuple(sorted(sum([list(self.points(b)) for b in SDN], [])))
c = self.conductor()
N = self.level()
D = self.discriminant()
b = ZZ(beta) % (2*N)
disc = D*c*c
U = []
R = []
h = self.ring_class_field().degree_over_K()
a = 1
while len(U) < h:
if c.gcd(a) != 1:
a += 1
continue
# todo (optimize) -- replace for over all s with for over solution set
y = ZZ((b*b - disc)/(4*N))
for s in Integers(a):
if N*s*s + b*s + y == 0:
s = s.lift()
f = (a*N, b+2*N*s, ZZ( ((b + 2*N*s)**2 - disc)/(4*a*N)) )
g = BinaryQF(f).reduced_form()
assert g.discriminant() == disc
if g not in U:
U.append(g)
R.append(HeegnerPointOnX0N(N,D,c,f))
if len(U) >= h:
break
a += 1
return tuple(sorted(R))
def plot(self, *args, **kwds):
"""
Returns plot of all the representatives in the upper half
plane of the Heegner points in this set of Heegner points.
The inputs to this function get passed onto the point command.
EXAMPLES::
sage: heegner_points(389,-7,5).plot(pointsize=50, rgbcolor='red')
Graphics object consisting of 12 graphics primitives
sage: heegner_points(53,-7,15).plot(pointsize=50, rgbcolor='purple')
Graphics object consisting of 48 graphics primitives
"""
return sum(z.plot(*args, **kwds) for z in self)
class HeegnerPointOnX0N(HeegnerPoint):
r"""
A Heegner point as a point on the modular curve `X_0(N)`, which we
view as the upper half plane modulo the action of `\Gamma_0(N)`.
EXAMPLES::
sage: x = heegner_point(37,-7,5); x
Heegner point 5/74*sqrt(-7) - 11/74 of discriminant -7 and conductor 5 on X_0(37)
sage: type(x)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPointOnX0N'>
sage: x.level()
37
sage: x.conductor()
5
sage: x.discriminant()
-7
sage: x.quadratic_field()
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: x.quadratic_form()
37*x^2 + 11*x*y + 2*y^2
sage: x.quadratic_order()
Order in Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
sage: x.tau()
5/74*sqrt_minus_7 - 11/74
sage: loads(dumps(x)) == x
True
"""
def __init__(self, N, D, c=ZZ(1), f=None, check=True):
r"""
INPUT:
- `N` -- positive integer
- `D` -- fundamental discriminant, a negative integer
- `c` -- conductor, a positive integer coprime to `N`
- `f` -- binary quadratic form, 3-tuple `(A,B,C)` of coefficients
of `AX^2 + BXY + CY^2`, or element of quadratic imaginary
field `\QQ(\sqrt{D})` in the upper half plan.
- ``check`` -- bool, default: ``True``. should not be used
except internally.
EXAMPLES::
sage: x = heegner_point(389, -7, 5); x
Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389)
sage: type(x)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPointOnX0N'>
sage: sage.schemes.elliptic_curves.heegner.HeegnerPointOnX0N(389, -7, 5, None, check=False)
Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389)
"""
if check:
N = ZZ(N); D = ZZ(D); c = ZZ(c)
if c.gcd(N) != 1:
raise ValueError("conductor c (=%s) must be coprime to N (=%s)" % (c, N))
if not satisfies_weak_heegner_hypothesis(N, D):
raise ValueError("N (=%s) and D (=%s) must satisfy the Heegner hypothesis"%(N, D))
if f is not None:
if isinstance(f, tuple):
if len(f) != 3:
raise ValueError("if f is a tuple, it must have length 3")
f = tuple(ZZ(a) for a in f)
elif isinstance(f, BinaryQF):
# convert from BinaryQF
f = tuple(f)
elif sage.rings.number_field.number_field_element.is_NumberFieldElement(f):
# tau = number field element
g = f.minpoly()
if g.degree() != 2:
raise TypeError("number field element f must have degree 2")
g *= g.denominator() # make integral
f = (ZZ(g[2]), ZZ(g[1]), ZZ(g[0]))
else:
raise TypeError("f must be a 3-tuple, quadratic form, or element of the upper half plane")
A, B, C = f
if B*B - 4*A*C != D*c*c:
raise ValueError("f (=%s) must have discriminant %s"%(f, D*c*c))
HeegnerPoint.__init__(self, N, D, c)
if f is None:
# We know that N|A, so A = N is optimal.
A = N
B = ZZ(Integers(4*N)(D*c*c).sqrt(extend=False) % (2*N))
C = ZZ((B*B - D*c*c)/(4*A))
f = (A,B,C)
self.__f = f
def __hash__(self):
"""
The hash is obtained from the hash provided by :class:`HeegnerPoint`,
together with the reduced quadratic form.
EXAMPLES::
sage: x = heegner_point(37,-7,5)
sage: from sage.schemes.elliptic_curves.heegner import HeegnerPoint
sage: hash(x) == hash( (HeegnerPoint.__hash__(x), x.reduced_quadratic_form()) )
True
"""
return hash((HeegnerPoint.__hash__(self), self.reduced_quadratic_form()))
def __richcmp__(self, x, op):
"""
Compare two Heegner points with character.
EXAMPLES::
sage: x1 = EllipticCurve('389a').heegner_point(-7).heegner_point_on_X0N()
sage: x5 = EllipticCurve('389a').heegner_point(-7,5).heegner_point_on_X0N()
sage: x1 == x1
True
sage: x1 < x5
True
sage: x5 > x1
True
"""
if not isinstance(x, HeegnerPointOnX0N):
return NotImplemented
return richcmp((self.level(), self.discriminant(),
self.conductor(), self.__f),
(x.level(), x.discriminant(),
x.conductor(), x.__f), op)
def _repr_(self):
"""
Return string representation of this Heegner point.
EXAMPLES::
sage: x = heegner_point(37,-7,5); x._repr_()
'Heegner point 5/74*sqrt(-7) - 11/74 of discriminant -7 and conductor 5 on X_0(37)'
"""
c = self.conductor()
s = " and conductor %s"%c if c != 1 else ""
N = self.level()
D = self.discriminant()
tau = repr(self.tau()).replace('sqrt_minus_%s'%(-D),'sqrt(%s)'%D)
return "Heegner point %s of discriminant %s%s on X_0(%s)"%(tau, D, s, N)
def atkin_lehner_act(self, Q=None):
r"""
Given an integer Q dividing the level N such that `\gcd(Q, N/Q) = 1`, returns the
image of this Heegner point under the Atkin-Lehner operator `W_Q`.
INPUT:
- `Q` -- positive divisor of `N`; if not given, default to `N`
EXAMPLES::
sage: x = heegner_point(389,-7,5)
sage: x.atkin_lehner_act()
Heegner point 5/199168*sqrt(-7) - 631/199168 of discriminant -7 and conductor 5 on X_0(389)
sage: x = heegner_point(45,D=-11,c=1); x
Heegner point 1/90*sqrt(-11) - 13/90 of discriminant -11 on X_0(45)
sage: x.atkin_lehner_act(5)
Heegner point 1/90*sqrt(-11) + 23/90 of discriminant -11 on X_0(45)
sage: y = x.atkin_lehner_act(9); y
Heegner point 1/90*sqrt(-11) - 23/90 of discriminant -11 on X_0(45)
sage: z = y.atkin_lehner_act(9); z
Heegner point 1/90*sqrt(-11) - 13/90 of discriminant -11 on X_0(45)
sage: z == x
True
"""
N = self.level()
if Q is None:
Q = N
if Q == 1:
return self # trivial special case
g, u, v = xgcd(Q * Q, -N)
if g != Q:
raise ValueError("Q must divide N and be coprime to N/Q")
tau = self.tau()
WQ_tau = ((u * Q * tau + v) / (N * tau + Q))
return HeegnerPointOnX0N(N, self.discriminant(), self.conductor(),
f=WQ_tau, check=True)
@cached_method
def quadratic_form(self):
"""
Return the integral primitive positive-definite binary
quadratic form associated to this Heegner point.
EXAMPLES::
sage: heegner_point(389,-7,5).quadratic_form()
389*x^2 + 147*x*y + 14*y^2
"""
# It is good/important that this return a copy, since
# BinaryQF's stupidly are mutable and cannot be made immutable.
# In particular, they have a stupid reduce method that changes
# them in place.
return BinaryQF(self.__f)
def reduced_quadratic_form(self):
"""
Return reduced binary quadratic corresponding to this Heegner point.
EXAMPLES::
sage: x = heegner_point(389,-7,5)
sage: x.quadratic_form()
389*x^2 + 147*x*y + 14*y^2
sage: x.reduced_quadratic_form()
4*x^2 - x*y + 11*y^2
"""
return self.quadratic_form().reduced_form()
@cached_method
def tau(self):
"""
Return an element ``tau`` in the upper half plane that corresponds
to this particular Heegner point.
Actually, ``tau`` is in the quadratic imaginary field K associated
to this Heegner point.
EXAMPLES::
sage: x = heegner_point(37,-7,5); tau = x.tau(); tau
5/74*sqrt_minus_7 - 11/74
sage: 37 * tau.minpoly()
37*x^2 + 11*x + 2
sage: x.quadratic_form()
37*x^2 + 11*x*y + 2*y^2
"""
K = self.quadratic_field()
d = K.gen() * self.conductor()
A, B, _ = self.__f
return (-B + d) / (2 * A)
def map_to_curve(self, E):
"""
Return the image of this Heegner point on the elliptic curve
`E`, which must also have conductor `N`, where `N` is the
level of ``self``.
EXAMPLES::
sage: x = heegner_point(389,-7,5); x
Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389)
sage: y = x.map_to_curve(EllipticCurve('389a')); y
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
sage: y.curve().cremona_label()
'389a1'
sage: y.heegner_point_on_X0N()
Heegner point 5/778*sqrt(-7) - 147/778 of discriminant -7 and conductor 5 on X_0(389)
You can also directly apply the modular parametrization of the elliptic curve::
sage: x = heegner_point(37,-7); x
Heegner point 1/74*sqrt(-7) - 17/74 of discriminant -7 on X_0(37)
sage: E = EllipticCurve('37a'); phi = E.modular_parametrization()
sage: phi(x)
Heegner point of discriminant -7 on elliptic curve of conductor 37
"""
return HeegnerPointOnEllipticCurve(E, self)
@cached_method
def galois_orbit_over_K(self):
r"""
Return the `Gal(K_c/K)`-orbit of this Heegner point.
EXAMPLES::
sage: x = heegner_point(389,-7,3); x
Heegner point 3/778*sqrt(-7) - 223/778 of discriminant -7 and conductor 3 on X_0(389)
sage: x.galois_orbit_over_K()
[Heegner point 3/778*sqrt(-7) - 223/778 of discriminant -7 and conductor 3 on X_0(389), Heegner point 3/1556*sqrt(-7) - 223/1556 of discriminant -7 and conductor 3 on X_0(389), Heegner point 3/1556*sqrt(-7) - 1001/1556 of discriminant -7 and conductor 3 on X_0(389), Heegner point 3/3112*sqrt(-7) - 223/3112 of discriminant -7 and conductor 3 on X_0(389)]
"""
c = self.conductor()
N = self.level()
D = self.discriminant()
b = self.__f[1] % (2*N) # B
disc = D*c*c
U = []
R = []
h = self.ring_class_field().degree_over_K()
a = 1
while len(U) < h:
if c.gcd(a) != 1:
a += 1
continue
# todo (optimize) -- replace for over all s with for over solution set
y = ZZ((b*b - disc)/(4*N))
for s in Integers(a):
if N*s*s + b*s + y == 0:
s = s.lift()
f = (a*N, b+2*N*s, ZZ( ((b + 2*N*s)**2 - disc)/(4*a*N)) )
g = BinaryQF(f).reduced_form()
assert g.discriminant() == disc
if g not in U:
U.append(g)
R.append(HeegnerPointOnX0N(N,D,c,f))
a += 1
return R
def plot(self, **kwds):
r"""
Draw a point at `(x,y)` where this Heegner point is
represented by the point `\tau = x + i y` in the upper half
plane.
The ``kwds`` get passed onto the point plotting command.
EXAMPLES::
sage: heegner_point(389,-7,1).plot(pointsize=50)
Graphics object consisting of 1 graphics primitive
"""
from sage.plot.all import point
return point(CDF(self.tau()), **kwds)
class HeegnerPointOnEllipticCurve(HeegnerPoint):
"""
A Heegner point on a curve associated to an order in a quadratic
imaginary field.
EXAMPLES::
sage: E = EllipticCurve('37a'); P = E.heegner_point(-7,5); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 37
sage: type(P)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPointOnEllipticCurve'>
"""
def __init__(self, E, x, check=True):
r"""
INPUT:
- `E` -- an elliptic curve over the rational numbers
- `x` -- Heegner point on `X_0(N)`
- ``check`` -- bool (default: ``True``); if ``True``, ensure that `D`,
`c` are of type Integer and define a Heegner point
on `E`
EXAMPLES::
sage: x = heegner_point(389,-7,5)
sage: E = EllipticCurve('389a')
sage: sage.schemes.elliptic_curves.heegner.HeegnerPointOnEllipticCurve(E, x)
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
"""
if check:
if E.conductor() != x.level():
raise ValueError("conductor of curve must equal level of Heegner point")
self.__E = E
self.__x = x
HeegnerPoint.__init__(self, x.level(), x.discriminant(), x.conductor())
@cached_method
def satisfies_kolyvagin_hypothesis(self, n=None):
r"""
Return ``True`` if this Heegner point and `n` satisfy the
Kolyvagin hypothesis, i.e., that each prime dividing the
conductor `c` of ``self`` is inert in K and coprime to `ND`.
Moreover, if `n` is not ``None``, also check that for each prime
`p` dividing `c` we have that `n | \gcd(a_p(E), p+1)`.
INPUT:
`n` -- positive integer
EXAMPLES::
sage: EllipticCurve('389a').heegner_point(-7).satisfies_kolyvagin_hypothesis()
True
sage: EllipticCurve('389a').heegner_point(-7,5).satisfies_kolyvagin_hypothesis()
True
sage: EllipticCurve('389a').heegner_point(-7,11).satisfies_kolyvagin_hypothesis()
False
"""
if n is not None:
n = ZZ(n)
if n <= 0: raise ValueError("n must be a positive integer")
return is_kolyvagin_conductor(N=self.level(), E=self.__E, D=self.discriminant(),
r=None, n=n, c=self.conductor())
def __hash__(self):
"""
The hash value is obtained from the elliptic curve and the Heegner
point on `X_0(N)`.
EXAMPLES::
sage: x = EllipticCurve('389a').heegner_point(-7,5)
sage: hash(x) == hash( (x.curve(), x.heegner_point_on_X0N()) )
True
"""
return hash((self.__E, self.__x))
def __eq__(self, right):
"""
EXAMPLES::
sage: y1 = EllipticCurve('389a').heegner_point(-7)
sage: y5 = EllipticCurve('389a').heegner_point(-7,5)
sage: y1 == y1
True
sage: y5 == y5
True
sage: y1 == y5
False
sage: y1 == 10
False
"""
return isinstance(right, HeegnerPointOnEllipticCurve) and \
(self.__E, self.__x) == (right.__E, right.__x)
def __ne__(self, other):
"""
EXAMPLES::
sage: y1 = EllipticCurve('389a').heegner_point(-7)
sage: y5 = EllipticCurve('389a').heegner_point(-7,5)
sage: y1 != y1
False
sage: y5 != y5
False
sage: y1 != y5
True
sage: y1 != 10
True
"""
return not (self == other)
def _repr_(self):
"""
Return string representation of this Heegner point.
EXAMPLES::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 97)
sage: P._repr_()
'Heegner point of discriminant -7 and conductor 97 on elliptic curve of conductor 389'
"""
s = " and conductor %s"%self.conductor() if self.conductor() != 1 else ""
N = self.__E.conductor()
return "Heegner point of discriminant %s%s on elliptic curve of conductor %s"%(self.discriminant(), s, N)
def heegner_point_on_X0N(self):
r"""
Return Heegner point on `X_0(N)` that maps to this Heegner point on `E`.
EXAMPLES::
sage: E = EllipticCurve('37a'); P = E.heegner_point(-7,5); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 37
sage: P.heegner_point_on_X0N()
Heegner point 5/74*sqrt(-7) - 11/74 of discriminant -7 and conductor 5 on X_0(37)
"""
return self.__x
def map_to_complex_numbers(self, prec=53):
"""
Return the point in the subfield `M` of the complex numbers
(well defined only modulo the period lattice) corresponding to
this Heegner point.
EXAMPLES:
We compute a nonzero Heegner point over a ring class field on
a curve of rank 2::
sage: E = EllipticCurve('389a'); y = E.heegner_point(-7,5)
sage: y.map_to_complex_numbers()
1.49979679635196 + 0.369156204821526*I
sage: y.map_to_complex_numbers(100)
1.4997967963519640592142411892 + 0.36915620482152626830089145962*I
sage: y.map_to_complex_numbers(10)
1.5 + 0.37*I
Here we see that the Heegner point is 0 since it lies in the
lattice::
sage: E = EllipticCurve('389a'); y = E.heegner_point(-7)
sage: y.map_to_complex_numbers(10)
0.0034 - 3.9*I
sage: y.map_to_complex_numbers()
4.71844785465692e-15 - 3.94347540310330*I
sage: E.period_lattice().basis()
(2.49021256085505, 1.97173770155165*I)
sage: 2*E.period_lattice().basis()[1]
3.94347540310330*I
You can also directly coerce to the complex field::
sage: E = EllipticCurve('389a'); y = E.heegner_point(-7)
sage: z = ComplexField(100)(y); z # real part approx. 0
-... - 3.9434754031032964088448153963*I
sage: E.period_lattice().elliptic_exponential(z)
(0.00000000000000000000000000000 : 1.0000000000000000000000000000 : 0.00000000000000000000000000000)
"""
phi = self.__E.modular_parametrization()
tau = self.heegner_point_on_X0N().tau()
return phi.map_to_complex_numbers(tau, prec)
def _complex_mpfr_field_(self, C):
"""
Used internally for coercing Heegner point to a complex field.
EXAMPLES::
sage: E = EllipticCurve('37a'); y = E.heegner_point(-7)
sage: CC(y) # indirect doctest
0.929592715285395 - 1.22569469099340*I
sage: ComplexField(100)(y)
0.92959271528539567440519934446 - 1.2256946909933950304271124159*I
"""
phi = self.__E.modular_parametrization()
tau = C(self.heegner_point_on_X0N().tau())
return phi.map_to_complex_numbers(tau)
@cached_method
def kolyvagin_point(self):
"""
Return the Kolyvagin point corresponding to this Heegner
point.
This is the point obtained by applying the Kolyvagin
operator `J_c I_c` in the group ring of the Galois group to
this Heegner point. It is a point that defines an element
of `H^1(K, E[n])`, under certain hypotheses on `n`.
EXAMPLES::
sage: E = EllipticCurve('37a1'); y = E.heegner_point(-7); y
Heegner point of discriminant -7 on elliptic curve of conductor 37
sage: P = y.kolyvagin_point(); P
Kolyvagin point of discriminant -7 on elliptic curve of conductor 37
sage: P.numerical_approx() # abs tol 1e-15
(-3.36910401903861e-16 - 2.22076195576076e-16*I : 3.33066907387547e-16 + 2.22076195576075e-16*I : 1.00000000000000)
"""
return KolyvaginPoint(self)
@cached_method
def _trace_index(self, *args, **kwds):
"""
Return index of the trace of this Heegner point down to `K` in
the group of `K`-rational points.
IMPORTANT: See the help for ``E=self.curve(); E.index?`` for
the inputs to this function and more details about what is
computed. In particular, the returned index can be off at 2.
OUTPUT:
- ``Integer`` -- returns an integer
EXAMPLES::
sage: E = EllipticCurve('77a1')
sage: P = E.heegner_point(-19); y = P._trace_numerical_conductor_1(); [c.real() for c in y]
[-1.2...e-16, -1.00000000000000, 1.00000000000000]
sage: -2*E.gens()[0]
(0 : -1 : 1)
sage: P._trace_index()
2
sage: P = E.heegner_point(-68); P
Heegner point of discriminant -68 on elliptic curve of conductor 77
sage: N(P)
(0.219223593595584 - 1.87443160153148*I : -1.34232921921325 - 1.52356748877889*I : 1.00000000000000)
sage: P._trace_index()
0
"""
if self.conductor() != 1:
raise ValueError("conductor of Heegner point must be 1")
i = self.__E.heegner_index(self.discriminant(), *args, **kwds)
lower = i.lower().round()
upper = i.upper().round()
if lower == upper:
return lower
# here we would say raise precision somehow.
raise NotImplementedError("unable to compute index")
def curve(self):
"""
Return the elliptic curve on which this is a Heegner point.
EXAMPLES::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5)
sage: P.curve()
Elliptic Curve defined by y^2 + y = x^3 + x^2 - 2*x over Rational Field
sage: P.curve() is E
True
"""
return self.__E
@cached_method
def quadratic_form(self):
"""
Return the integral primitive positive definite binary
quadratic form associated to this Heegner point.
EXAMPLES::
sage: EllipticCurve('389a').heegner_point(-7, 5).quadratic_form()
389*x^2 + 147*x*y + 14*y^2
sage: P = EllipticCurve('389a').heegner_point(-7, 5, (778,925,275)); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
sage: P.quadratic_form()
778*x^2 + 925*x*y + 275*y^2
"""
return self.__x.quadratic_form()
@cached_method
def numerical_approx(self, prec=53, algorithm=None):
"""
Return a numerical approximation to this Heegner point
computed using a working precision of prec bits.
.. WARNING::
The answer is *not* provably correct to prec bits! A
priori, due to rounding and other errors, it is possible that
not a single digit is correct.
INPUT:
- prec -- (default: ``None``) the working precision
EXAMPLES::
sage: E = EllipticCurve('37a'); P = E.heegner_point(-7); P
Heegner point of discriminant -7 on elliptic curve of conductor 37
sage: P.numerical_approx() # abs tol 1e-15
(-3.36910401903861e-16 - 2.22076195576076e-16*I : 3.33066907387547e-16 + 2.22076195576075e-16*I : 1.00000000000000)
sage: P.numerical_approx(10) # expect random digits
(0.0030 - 0.0028*I : -0.0030 + 0.0028*I : 1.0)
sage: P.numerical_approx(100)[0] # expect random digits
8.4...e-31 + 6.0...e-31*I
sage: E = EllipticCurve('37a'); P = E.heegner_point(-40); P
Heegner point of discriminant -40 on elliptic curve of conductor 37
sage: P.numerical_approx() # abs tol 1e-14
(-3.15940603400359e-16 + 1.41421356237309*I : 1.00000000000000 - 1.41421356237309*I : 1.00000000000000)
A rank 2 curve, where all Heegner points of conductor 1 are 0::
sage: E = EllipticCurve('389a'); E.rank()
2
sage: P = E.heegner_point(-7); P
Heegner point of discriminant -7 on elliptic curve of conductor 389
sage: P.numerical_approx()
(0.000000000000000 : 1.00000000000000 : 0.000000000000000)
However, Heegner points of bigger conductor are often nonzero::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
sage: numerical_approx(P)
(0.675507556926806 + 0.344749649302635*I : -0.377142931401887 + 0.843366227137146*I : 1.00000000000000)
sage: P.numerical_approx()
(0.6755075569268... + 0.3447496493026...*I : -0.3771429314018... + 0.8433662271371...*I : 1.00000000000000)
sage: E.heegner_point(-7, 11).numerical_approx()
(0.1795583794118... + 0.02035501750912...*I : -0.5573941377055... + 0.2738940831635...*I : 1.00000000000000)
sage: E.heegner_point(-7, 13).numerical_approx()
(1.034302915374... - 3.302744319777...*I : 1.323937875767... + 6.908264226850...*I : 1.00000000000000)
We find (probably) the defining polynomial of the
`x`-coordinate of `P`, which defines a class field. The shape of
the discriminant below is strong confirmation -- but not proof
-- that this polynomial is correct::
sage: f = P.numerical_approx(70)[0].algdep(6); f
1225*x^6 + 1750*x^5 - 21675*x^4 - 380*x^3 + 110180*x^2 - 129720*x + 48771
sage: f.discriminant().factor()
2^6 * 3^2 * 5^11 * 7^4 * 13^2 * 19^6 * 199^2 * 719^2 * 26161^2
"""
tau = ComplexField(prec)(self.tau())
E = self.curve()
return E.modular_parametrization()(tau)
def tau(self):
r"""
Return `\tau` in the upper half plane that maps via the
modular parametrization to this Heegner point.
EXAMPLES::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5)
sage: P.tau()
5/778*sqrt_minus_7 - 147/778
"""
return self.heegner_point_on_X0N().tau()
@cached_method
def x_poly_exact(self, prec=53, algorithm='lll'):
"""
Return irreducible polynomial over the rational numbers
satisfied by the `x` coordinate of this Heegner point. A
ValueError is raised if the precision is clearly insignificant
to define a point on the curve.
.. WARNING::
It is in theory possible for this function to not raise a
ValueError, find a polynomial, but via some very unlikely
coincidence that point is not actually this Heegner point.
INPUT:
- ``prec`` -- integer (default: 53)
- ``algorithm`` -- 'conjugates' or 'lll' (default); if
'conjugates', compute numerically all the
conjugates ``y[i]`` of the Heegner point and construct
the characteristic polynomial as the product
`f(X)=(X-y[i])`. If 'lll', compute only one of the
conjugates ``y[0]``, then uses the LLL algorithm to
guess `f(X)`.
EXAMPLES:
We compute some `x`-coordinate polynomials of some conductor 1
Heegner points::
sage: E = EllipticCurve('37a')
sage: v = E.heegner_discriminants_list(10)
sage: [E.heegner_point(D).x_poly_exact() for D in v]
[x, x, x^2 + 2, x^5 - x^4 + x^3 + x^2 - 2*x + 1, x - 6, x^7 - 2*x^6 + 9*x^5 - 10*x^4 - x^3 + 8*x^2 - 5*x + 1, x^3 + 5*x^2 + 10*x + 4, x^4 - 10*x^3 + 10*x^2 + 12*x - 12, x^8 - 5*x^7 + 7*x^6 + 13*x^5 - 10*x^4 - 4*x^3 + x^2 - 5*x + 7, x^6 - 2*x^5 + 11*x^4 - 24*x^3 + 30*x^2 - 16*x + 4]
We compute `x`-coordinate polynomials for some Heegner points
of conductor bigger than 1 on a rank 2 curve::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
sage: P.x_poly_exact()
Traceback (most recent call last):
...
ValueError: insufficient precision to determine Heegner point (fails discriminant test)
sage: P.x_poly_exact(75)
x^6 + 10/7*x^5 - 867/49*x^4 - 76/245*x^3 + 3148/35*x^2 - 25944/245*x + 48771/1225
sage: E.heegner_point(-7,11).x_poly_exact(300)
x^10 + 282527/52441*x^9 + 27049007420/2750058481*x^8 - 22058564794/2750058481*x^7 - 140054237301/2750058481*x^6 + 696429998952/30250643291*x^5 + 2791387923058/30250643291*x^4 - 3148473886134/30250643291*x^3 + 1359454055022/30250643291*x^2 - 250620385365/30250643291*x + 181599685425/332757076201
Here we compute a Heegner point of conductor 5 on a rank 3 curve::
sage: E = EllipticCurve('5077a'); P = E.heegner_point(-7,5); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 5077
sage: P.x_poly_exact(300)
x^6 + 1108754853727159228/72351048803252547*x^5 + 88875505551184048168/1953478317687818769*x^4 - 2216200271166098662132/3255797196146364615*x^3 + 14941627504168839449851/9767391588439093845*x^2 - 3456417460183342963918/3255797196146364615*x + 1306572835857500500459/5426328660243941025
"""
n = self.ring_class_field().degree_over_K()
if algorithm == 'lll':
P = self.numerical_approx(prec)
g = None
for e in [1,2]: # is there a condition under which we should not bother trying e=1?
f = P[0].algdep(e*n)
# If f is correct, then disc(f) = m^2 * (a product of primes dividing D*c).
# To check this, we divide out the primes dividing D*c, then
# check that the resulting cofactor is a perfect square.
F = f.factor()
if len(F) == 1:
f = F[0][0]
if self._check_poly_discriminant(f):
g = f
break
if g is None:
raise ValueError("insufficient precision to determine Heegner point (fails discriminant test)")
f = g
f = f/f.leading_coefficient()
elif algorithm == 'conjugates':
raise NotImplementedError
return f
def _check_poly_discriminant(self, f):
"""
Return ``True`` if the prime to `Dc` part of the discriminant of
each factor of the polynomial `f` is plus or minus a square.
This is used for verifying that a polynomial is likely to
define a subfield of a specific ring class field.
INPUT:
- `f` -- a polynomial
EXAMPLES::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
sage: R.<x> = QQ[]
sage: P._check_poly_discriminant(x^2 - 5)
True
sage: P._check_poly_discriminant(x^2 - 19)
False
sage: P._check_poly_discriminant((x^2 - 19)*(x^2-5))
False
"""
if f.is_irreducible():
disc = f.discriminant()
(D, c) = (self.discriminant(), self.conductor())
for p in D.prime_divisors() + c.prime_divisors():
disc = disc // (p**disc.valuation(p))
if disc < 0: disc = -disc
return disc.is_square()
else:
for g,_ in f.factor():
if not self._check_poly_discriminant(g):
return False
return True
def point_exact(self, prec=53, algorithm='lll', var='a', optimize=False):
"""
Return exact point on the elliptic curve over a number field
defined by computing this Heegner point to the given number of
bits of precision. A ValueError is raised if the precision
is clearly insignificant to define a point on the curve.
.. WARNING::
It is in theory possible for this function to not raise a
ValueError, find a point on the curve, but via some very
unlikely coincidence that point is not actually this Heegner
point.
.. WARNING::
Currently we make an arbitrary choice of `y`-coordinate for
the lift of the `x`-coordinate.
INPUT:
- ``prec`` -- integer (default: 53)
- ``algorithm`` -- see the description of the algorithm
parameter for the ``x_poly_exact`` method.
- ``var`` -- string (default: 'a')
- ``optimize`` -- book (default; False) if ``True``, try to
optimize defining polynomial for the number field that
the point is defined over. Off by default, since this
can be very expensive.
EXAMPLES::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-7, 5); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
sage: z = P.point_exact(100, optimize=True)
sage: z[1].charpoly()
x^12 + 6*x^11 + 90089/1715*x^10 + 71224/343*x^9 + 52563964/588245*x^8 - 483814934/588245*x^7 - 156744579/16807*x^6 - 2041518032/84035*x^5 + 1259355443184/14706125*x^4 + 3094420220918/14706125*x^3 + 123060442043827/367653125*x^2 + 82963044474852/367653125*x + 211679465261391/1838265625
sage: f = P.numerical_approx(500)[1].algdep(12); f / f.leading_coefficient()
x^12 + 6*x^11 + 90089/1715*x^10 + 71224/343*x^9 + 52563964/588245*x^8 - 483814934/588245*x^7 - 156744579/16807*x^6 - 2041518032/84035*x^5 + 1259355443184/14706125*x^4 + 3094420220918/14706125*x^3 + 123060442043827/367653125*x^2 + 82963044474852/367653125*x + 211679465261391/1838265625
sage: E = EllipticCurve('5077a')
sage: P = E.heegner_point(-7)
sage: P.point_exact(prec=100)
(0 : 1 : 0)
"""
E = self.__E
if self.numerical_approx(prec)[-1] == 0:
return E(0)
f = self.x_poly_exact(prec, algorithm=algorithm)
if f.degree() == 1:
v = E.lift_x(-f[0], all=True)
if v:
return v[0]
g, d = make_monic(f)
K = rings.NumberField(g, var)
x = K.gen() / d
if optimize:
KO, from_KO, to_KO = K.optimized_representation()
K = KO
x = to_KO(x)
if K.degree() < 2 * self.ring_class_field().degree_over_K():
M = rings.QuadraticField(self.discriminant(),'b')
KD = K.composite_fields(M, names='a')[0]
phi = K.embeddings(KD)[0]
x = phi(x)
K = KD.change_names(names=var)
x = K.structure()[1](x)
a1,a2,a3,a4,a6 = E.a_invariants()
R = K['Y']; Y = R.gen()
g = Y**2 + a1*x*Y + a3*Y - (x**3 + a2*x**2 + a4*x + a6)
F = g.factor() # this takes a long time
if len(F) == 1 and F[0][0] == 2:
# reducible -- 1 factor squared
y = F[0][0]
L = K
elif len(F) == 2:
# reducible -- 2 factors
y0 = -F[0][0][0]
# y1 = -F[1][0][0]
# Figure out which of y0 or y1 is right by
# P = self.numerical_approx(prec)
# TODO: finish this -- have to do some thing numerical
y = y0
L = K
else:
# TODO -- is there an issue with choice of root?
# irreducible
gg, dd = make_monic(g)
M = K.extension(gg, names='b')
y = M.gen()/dd
x = M(x)
L = M.absolute_field(names = var)
phi = L.structure()[1]
x = phi(x)
y = phi(y)
EL = E.change_ring(L)
P = EL.point((x,y,1), check=False)
return P
@cached_method
def conjugates_over_K(self):
r"""
Return the `Gal(K_c/K)` conjugates of this Heegner point.
EXAMPLES::
sage: E = EllipticCurve('77a')
sage: y = E.heegner_point(-52,5); y
Heegner point of discriminant -52 and conductor 5 on elliptic curve of conductor 77
sage: print([z.quadratic_form() for z in y.conjugates_over_K()])
[77*x^2 + 52*x*y + 13*y^2, 154*x^2 + 206*x*y + 71*y^2, 539*x^2 + 822*x*y + 314*y^2, 847*x^2 + 1284*x*y + 487*y^2, 1001*x^2 + 52*x*y + y^2, 1078*x^2 + 822*x*y + 157*y^2, 1309*x^2 + 360*x*y + 25*y^2, 1309*x^2 + 2054*x*y + 806*y^2, 1463*x^2 + 976*x*y + 163*y^2, 2233*x^2 + 2824*x*y + 893*y^2, 2387*x^2 + 2054*x*y + 442*y^2, 3619*x^2 + 3286*x*y + 746*y^2]
sage: y.quadratic_form()
77*x^2 + 52*x*y + 13*y^2
"""
H = heegner_points(self.level(), self.discriminant(), self.conductor())
E = self.curve()
beta = self.quadratic_form()[1]
return tuple([z.map_to_curve(E) for z in H.points(beta)])
def _numerical_approx_conjugates_over_QQ(self, prec=53):
"""
Return a list v of the numerical approximations to precision
prec of the conjugates of this Heegner point, and their
complex conjugates.
INPUT:
- ``prec`` -- positive integer (default: 53)
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: y = E.heegner_point(-7,3); y
Heegner point of discriminant -7 and conductor 3 on elliptic curve of conductor 37
sage: y._numerical_approx_conjugates_over_QQ()
[(-1.89564392373896 - 0.444771808762067*I : -1.50000000000000 + 2.13102976222246*I : 1.00000000000000), ...]
sage: y._numerical_approx_conjugates_over_QQ(prec=10)
[(-1.9 - 0.44*I : -1.5 + 2.1*I : 1.0), ...
(1.4 + 0.0024*I : -1.7 - 0.0046*I : 1.0)]
"""
v = []
for z in self.conjugates_over_K():
m = z.numerical_approx(prec)
v.append(m)
v.append(m.curve().point([w.conjugate() for w in m], check=False))
v.sort()
return v
def _numerical_approx_xy_poly(self, prec=53):
r"""
Return polynomials with real floating point coefficients got
by taking the real part of the product of `X - \alpha` over
the numerical approximations `\alpha` to the conjugates of
this Heegner point. The first polynomial runs through the
`x`-coordinates and the second through the `y`-coordinates.
INPUT:
- ``prec`` -- positive integer (default: 53)
OUTPUT:
- 2-tuple of polynomials with floating point coefficients
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: y = E.heegner_point(-7,3); y
Heegner point of discriminant -7 and conductor 3 on elliptic curve of conductor 37
sage: y._numerical_approx_xy_poly() # rel tol 1e-14
(X^8 + 6.00000000000000*X^7 + 8.99999999999998*X^6 - 12.0000000000000*X^5 - 42.0000000000000*X^4 - 17.9999999999999*X^3 + 36.0000000000001*X^2 + 35.9999999999999*X + 8.99999999999995, X^8 + 12.0000000000000*X^7 + 72.0000000000000*X^6 + 270.000000000000*X^5 + 678.000000000001*X^4 + 1152.00000000000*X^3 + 1269.00000000000*X^2 + 810.000000000002*X + 225.000000000001)
"""
v = self._numerical_approx_conjugates_over_QQ(prec)
R = ComplexField(prec)['X']
S = RealField(prec)['X']
X = R.gen()
fx = prod(X-a[0] for a in v)
fx = S([b.real() for b in fx])
fy = prod(X-c[1] for c in v)
fy = S([d.real() for d in fy])
return fx, fy
def _xy_poly_nearby(self, prec=53, max_error=10**(-10)):
"""
Return polynomials with rational coefficients that for sufficiently
tight bounds are the characteristic polynomial of the x and y
coordinate of this Heegner point.
INPUT:
- ``prec`` -- positive integer (default: 53)
- ``max_error`` -- very small floating point number
OUTPUT:
- 2-tuple of polynomials with rational coefficients
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: y = E.heegner_point(-7,3); y
Heegner point of discriminant -7 and conductor 3 on elliptic curve of conductor 37
sage: y._xy_poly_nearby()
[X^8 + 6*X^7 + 9*X^6 - 12*X^5 - 42*X^4 - 18*X^3 + 36*X^2 + 36*X + 9,
X^8 + 12*X^7 + 72*X^6 + 270*X^5 + 678*X^4 + 1152*X^3 + 1269*X^2 + 810*X + 225]
"""
v = self._numerical_approx_xy_poly(prec)
return [nearby_rational_poly(g, max_error=max_error) for g in v]
def _xy_poly_simplest(self, prec=53, prec2=None):
"""
Return polynomials with rational coefficients that for
sufficiently tight bounds are the characteristic polynomial of
the x and y coordinate of this Heegner point.
INPUT:
- ``prec`` -- positive integer (default: 53)
- ``prec2`` -- passed into simplest_rational_poly function
EXAMPLES::
sage: E = EllipticCurve('37a'); y = E.heegner_point(-7,3)
sage: y._xy_poly_simplest()
[X^8 + 6*X^7 + 9*X^6 - 12*X^5 - 42*X^4 - 18*X^3 + 36*X^2 + 36*X + 9,
X^8 + 12*X^7 + 72*X^6 + 270*X^5 + 678*X^4 + 1152*X^3 + 1269*X^2 + 810*X + 225]
"""
v = self._numerical_approx_xy_poly(prec)
if prec2 is None: prec2 = max(2, prec - 20)
return [simplest_rational_poly(g,prec2) for g in v]
@cached_method
def _square_roots_mod_2N_of_D_mod_4N(self):
"""
Return the square roots of `D` modulo `4N` all reduced mod `2N`,
without multiplicity.
EXAMPLES::
sage: E = EllipticCurve('37a'); P = E.heegner_point(-40); P
Heegner point of discriminant -40 on elliptic curve of conductor 37
sage: P._square_roots_mod_2N_of_D_mod_4N()
[16, 58]
sage: parent(P._square_roots_mod_2N_of_D_mod_4N()[0])
Ring of integers modulo 74
"""
N = self.__E.conductor()
R = Integers(4*N)
m = 2*N
return sorted( set([a%m for a in R(self.discriminant()).sqrt(all=True)]) )
def _trace_numerical_conductor_1(self, prec=53):
"""
Return numerical approximation using ``prec`` terms of working
precision to the trace down to the quadratic imaginary field
`K` of this Heegner point.
INPUT:
- `prec` -- bits precision (default: 53)
EXAMPLES::
sage: E = EllipticCurve('57a1')
sage: P = E.heegner_point(-8); P
Heegner point of discriminant -8 on elliptic curve of conductor 57
sage: P._trace_numerical_conductor_1() # approx. (1 : 0 : 1)
(1.00000000000000 + ...e-16*I : ...e-16 - ...e-16*I : 1.00000000000000)
sage: P = E(2,1) # a generator
sage: E([1,0]).height()
0.150298370947295
sage: P.height()
0.0375745927368238
sage: E.heegner_index(-8)
2.0000?
sage: E.torsion_order()
1
sage: 2*P
(1 : 0 : 1)
"""
if self.conductor() != 1:
raise ValueError("conductor must be 1")
R, U = self._good_tau_representatives()
E = self.__E
phi = E.modular_parametrization()
C = rings.ComplexField(prec)
F = E.change_ring(C)
s = 0
for u, weight in U:
P = phi(C(self._qf_to_tau(u)))
z = F.point(list(P),check=False)
if abs(weight) == 2:
t = F.point(z,check=False) + F.point(tuple([x.conjugate() for x in z]), check=False)
if weight < 0:
s -= t
else:
s += t
else:
if weight < 0:
s -= z
else:
s += z
return s
@cached_method
def _good_tau_representatives(self):
"""
Return good upper half plane representatives for Heegner points.
ALGORITHM: This is Algorithm 3.5 in Watkins's paper.
EXAMPLES::
sage: P = EllipticCurve('389a1').heegner_point(-7)
sage: P._good_tau_representatives()
([(1, 1, 2)], [((389, 185, 22), 1)])
"""
if self.conductor() != 1: raise NotImplementedError
E = self.__E
SDN = self._square_roots_mod_2N_of_D_mod_4N()
beta = SDN[0]
U = []
R = []
N = self.__E.conductor()
D = self.discriminant()
h = self.ring_class_field().degree_over_K()
divs = D.gcd(N).divisors()
a = 1
while True:
for b in SDN:
b = b.lift()
# todo (optimize) -- replace for over all s with for over solution
# set that can be found quickly.
y = ZZ((b*b - D)/(4*N))
for s in Integers(a):
if N*s*s + b*s + y == 0:
s = s.lift()
f = (a*N, b+2*N*s, ZZ( ((b + 2*N*s)**2 - D)/(4*a*N)) )
for d in divs:
Q = d * prod(p**k for p,k in N.factor() if (b-beta)%(p**k)!=0)
g = self._qf_atkin_lehner_act(Q, f)
gbar = (ZZ(g[0]/N), -g[1], g[2]*N)
g = self._qf_reduce(g)
gbar = self._qf_reduce(gbar)
if g in R or gbar in R:
continue
R.append(g)
if g != gbar:
R.append(gbar)
epsilon_Q = prod([E.root_number(q) for q in Q.prime_divisors()])
if g == gbar:
# weight is epsilon_Q
weight = epsilon_Q
else:
# weight is 2*epsilon_Q
weight = 2*epsilon_Q
U.append((f,weight))
if len(R) == h:
return R, U
assert len(R) < h, "bug -- too many quadratic forms"
a += 1
def _qf_to_tau(self, f):
r"""
Function used internally that given a quadratic form
`f=(A,B,C)`, return `\tau` in the upper half plane with
`A\tau^2 + B \tau + C = 0`. Here `A>0` and `\gcd(A,B,C)=1`.
Also, `\tau` has discriminant `D=B^2-4AC`. In fact, `\tau =
(-B + \sqrt{D})/(2A)`.
INPUT:
- `f` -- binary quadratic form
EXAMPLES::
sage: P = EllipticCurve('57a1').heegner_point(-8)
sage: R, U = P._good_tau_representatives()
sage: f = U[0][0]; f
(57, 26, 3)
sage: P._qf_to_tau(f)
1/114*sqrt_minus_8 - 13/57
"""
c = self.conductor()
A,B,_ = f
alpha = c * self.quadratic_field().gen() # this is sqrt(D) = sqrt(c^2*disc(K))
return (-B + alpha)/(2*A)
def _qf_from_tau(self, tau):
r"""
Return quadratic form associated to a given `\tau` in the upper
half plane.
INPUT:
- `\tau` -- quadratic element of the upper half plane
EXAMPLES::
sage: P = EllipticCurve('57a1').heegner_point(-8)
sage: R, U = P._good_tau_representatives()
sage: f = U[0][0]; f
(57, 26, 3)
sage: tau = P._qf_to_tau(f); tau
1/114*sqrt_minus_8 - 13/57
sage: P._qf_from_tau(tau)
(57, 26, 3)
"""
g = tau.minpoly()
g *= g.denominator()
return (ZZ(g[2]), ZZ(g[1]), ZZ(g[0]))
def _qf_atkin_lehner_act(self, Q, f):
r"""
Given a positive integer `Q` with `Q | N` and `\gcd(Q, N/Q) =
1`, we compute the quadratic form corresponding to the image
of the `tau` corresponding to `f` under the Atkin-Lehner
operator `W_Q`.
We do this by letting `u,v` be integers such that
`u Q^2 - v N = Q`, and using that `W_Q` sends `\tau`
to `( (u Q \tau + v) / (N \tau + Q) ) / Q`.
INPUT:
- `Q` -- integer that divides the level `N`
- `f` -- quadratic form
OUTPUT:
- quadratic form
EXAMPLES::
sage: P = EllipticCurve('57a1').heegner_point(-8)
sage: R, U = P._good_tau_representatives()
sage: f = U[0][0]; f
(57, 26, 3)
sage: P._qf_atkin_lehner_act(3, f)
(1938, 1204, 187)
sage: g = P._qf_atkin_lehner_act(19, f); g
(114, -64, 9)
sage: h = P._qf_atkin_lehner_act(19, g); h
(7353, -4762, 771)
sage: BinaryQF(f).reduced_form() == BinaryQF(h).reduced_form()
True
"""
N = self.__E.conductor()
g, u, v = xgcd(Q*Q, -N)
assert g == Q
tau = self._qf_to_tau(f)
tau2 = ((u*Q*tau + v) / (N*tau + Q))
return self._qf_from_tau(tau2)
def _qf_reduce(self, f):
"""
Given a binary quadratic form `f` represented as a 3-tuple
(A,B,C), return the reduced binary quadratic form equivalent
to `f`, represented in the same way.
EXAMPLES::
sage: P = EllipticCurve('57a1').heegner_point(-8)
sage: R, U = P._good_tau_representatives()
sage: f = U[0][0]; f
(57, 26, 3)
sage: P._qf_reduce(f)
(1, 0, 2)
"""
return tuple(BinaryQF(f).reduced_form())
def kolyvagin_cohomology_class(self, n=None):
"""
Return the Kolyvagin class associated to this Heegner point.
INPUT:
- `n` -- positive integer that divides the gcd of `a_p`
and `p+1` for all `p` dividing the conductor. If `n` is
``None``, choose the largest valid `n`.
EXAMPLES::
sage: y = EllipticCurve('389a').heegner_point(-7,5)
sage: y.kolyvagin_cohomology_class(3)
Kolyvagin cohomology class c(5) in H^1(K,E[3])
"""
return KolyvaginCohomologyClassEn(self.kolyvagin_point(), n)
#########################################################################################
# Kolyvagin Points P_c
#########################################################################################
class KolyvaginPoint(HeegnerPoint):
"""
A Kolyvagin point.
EXAMPLES:
We create a few Kolyvagin points::
sage: EllipticCurve('11a1').kolyvagin_point(-7)
Kolyvagin point of discriminant -7 on elliptic curve of conductor 11
sage: EllipticCurve('37a1').kolyvagin_point(-7)
Kolyvagin point of discriminant -7 on elliptic curve of conductor 37
sage: EllipticCurve('37a1').kolyvagin_point(-67)
Kolyvagin point of discriminant -67 on elliptic curve of conductor 37
sage: EllipticCurve('389a1').kolyvagin_point(-7, 5)
Kolyvagin point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
One can also associated a Kolyvagin point to a Heegner point::
sage: y = EllipticCurve('37a1').heegner_point(-7); y
Heegner point of discriminant -7 on elliptic curve of conductor 37
sage: y.kolyvagin_point()
Kolyvagin point of discriminant -7 on elliptic curve of conductor 37
TESTS::
sage: y = EllipticCurve('37a1').heegner_point(-7)
sage: type(y)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerPointOnEllipticCurve'>
sage: loads(dumps(y)) == y
True
"""
def __init__(self, heegner_point):
"""
Create a Kolyvagin point.
INPUT:
- ``heegner_point`` -- a Heegner point on some elliptic curve
EXAMPLES:
We directly construct a Kolyvagin point from the KolyvaginPoint class::
sage: y = EllipticCurve('37a1').heegner_point(-7)
sage: sage.schemes.elliptic_curves.heegner.KolyvaginPoint(y)
Kolyvagin point of discriminant -7 on elliptic curve of conductor 37
"""
if not heegner_point.satisfies_kolyvagin_hypothesis():
raise ValueError("Heegner point does not satisfy Kolyvagin hypothesis")
self.__heegner_point = heegner_point
HeegnerPoint.__init__(self, heegner_point.level(), heegner_point.discriminant(),
heegner_point.conductor())
def satisfies_kolyvagin_hypothesis(self, n=None):
r"""
Return ``True`` if this Kolyvagin point satisfies the Heegner
hypothesis for `n`, so that it defines a Galois equivariant
element of `E(K_c)/n E(K_c)`.
EXAMPLES::
sage: y = EllipticCurve('389a').heegner_point(-7,5); P = y.kolyvagin_point()
sage: P.kolyvagin_cohomology_class(3)
Kolyvagin cohomology class c(5) in H^1(K,E[3])
sage: P.satisfies_kolyvagin_hypothesis(3)
True
sage: P.satisfies_kolyvagin_hypothesis(5)
False
sage: P.satisfies_kolyvagin_hypothesis(7)
False
sage: P.satisfies_kolyvagin_hypothesis(11)
False
"""
return self.__heegner_point.satisfies_kolyvagin_hypothesis(n)
def curve(self):
r"""
Return the elliptic curve over `\QQ` on which this Kolyvagin
point sits.
EXAMPLES::
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67, 3)
sage: P.curve()
Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
"""
return self.__heegner_point.curve()
def heegner_point(self):
"""
This Kolyvagin point `P_c` is associated to some Heegner point
`y_c` via Kolyvagin's construction. This function returns that
point `y_c`.
EXAMPLES::
sage: E = EllipticCurve('37a1')
sage: P = E.kolyvagin_point(-67); P
Kolyvagin point of discriminant -67 on elliptic curve of conductor 37
sage: y = P.heegner_point(); y
Heegner point of discriminant -67 on elliptic curve of conductor 37
sage: y.kolyvagin_point() is P
True
"""
return self.__heegner_point
def _repr_(self):
"""
Return string representation of this Kolyvagin point.
EXAMPLES::
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67,7); P._repr_()
'Kolyvagin point of discriminant -67 and conductor 7 on elliptic curve of conductor 37'
"""
s = repr(self.__heegner_point)
return s.replace('Heegner','Kolyvagin')
def index(self, *args, **kwds):
"""
Return index of this Kolyvagin point in the full group of
`K_c` rational points on `E`.
When the conductor is 1, this is computed numerically using
the Gross-Zagier formula and explicit point search, and it may
be off by `2`. See the documentation for ``E.heegner_index``,
where `E` is the curve attached to ``self``.
EXAMPLES::
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67); P.index()
6
"""
if self.conductor() == 1:
return self.__heegner_point._trace_index(*args, **kwds)
raise NotImplementedError
def numerical_approx(self, prec=53):
"""
Return a numerical approximation to this Kolyvagin point using
prec bits of working precision.
INPUT:
- ``prec`` -- precision in bits (default: 53)
EXAMPLES::
sage: P = EllipticCurve('37a1').kolyvagin_point(-7); P
Kolyvagin point of discriminant -7 on elliptic curve of conductor 37
sage: P.numerical_approx() # approx. (0 : 0 : 1)
(...e-16 - ...e-16*I : ...e-16 + ...e-16*I : 1.00000000000000)
sage: P.numerical_approx(100)[0].abs() < 2.0^-99
True
sage: P = EllipticCurve('389a1').kolyvagin_point(-7, 5); P
Kolyvagin point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
Numerical approximation is only implemented for points of conductor 1::
sage: P.numerical_approx()
Traceback (most recent call last):
...
NotImplementedError
"""
if self.conductor() == 1:
return self.__heegner_point._trace_numerical_conductor_1(prec)
raise NotImplementedError
def point_exact(self, prec=53):
"""
INPUT:
- ``prec`` -- precision in bits (default: 53)
EXAMPLES:
A rank 1 curve::
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67)
sage: P.point_exact()
(6 : -15 : 1)
sage: P.point_exact(40)
(6 : -15 : 1)
sage: P.point_exact(20)
Traceback (most recent call last):
...
RuntimeError: insufficient precision to find exact point
A rank 0 curve::
sage: E = EllipticCurve('11a1'); P = E.kolyvagin_point(-7)
sage: P.point_exact()
(-1/2*sqrt_minus_7 + 1/2 : -2*sqrt_minus_7 - 2 : 1)
A rank 2 curve::
sage: E = EllipticCurve('389a1'); P = E.kolyvagin_point(-7)
sage: P.point_exact()
(0 : 1 : 0)
"""
if self.conductor() == 1:
# the result is a point defined over K in the conductor 1 case, which is easier.
P = self.numerical_approx(prec)
E = self.curve()
if P[2] == 0:
return E(0)
if E.root_number() == -1:
return self._recognize_point_over_QQ(P, 2*self.index())
else:
# root number +1. We use algdep to recognize the x
# coordinate, stick it in the appropriate quadratic
# field, then make sure that we got the right
# embedding, and if not fix things so we do.
x = P[0]
C = x.parent()
f = x.algdep(2)
K = self.quadratic_field()
roots = [r[0] for r in f.roots(K)]
if not roots:
raise RuntimeError("insufficient precision to find exact point")
if len(roots) == 1:
X = roots[0]
else:
d = [abs(C(r) - x) for r in roots]
if d[0] == d[1]:
raise RuntimeError("insufficient precision to distinguish roots")
if d[0] < d[1]:
X = roots[0]
else:
X = roots[1]
F = E.change_ring(K)
Q = F.lift_x(X, all=True)
if len(Q) == 1:
return Q[0]
if not Q:
raise RuntimeError("insufficient precision")
y = P[1]
d = [abs(C(r[1])-y) for r in Q]
if d[0] == d[1]:
raise RuntimeError("insufficient precision to distinguish roots")
if d[0] < d[1]:
return Q[0]
else:
return Q[1]
else:
raise NotImplementedError
def plot(self, prec=53, *args, **kwds):
r"""
Plot a Kolyvagin point `P_1` if it is defined over the
rational numbers.
EXAMPLES::
sage: E = EllipticCurve('37a'); P = E.heegner_point(-11).kolyvagin_point()
sage: P.plot(prec=30, pointsize=50, rgbcolor='red') + E.plot()
Graphics object consisting of 3 graphics primitives
"""
if self.conductor() != 1:
raise NotImplementedError
E = self.curve()
if E.root_number() == -1:
P = self.numerical_approx(prec=prec)
from sage.plot.all import point, Graphics
if not P:
# point at infinity
return Graphics()
return point((P[0].real(), P[1].real()),*args, **kwds)
else:
raise NotImplementedError
@cached_method
def trace_to_real_numerical(self, prec=53):
"""
Return the trace of this Kolyvagin point down to the real
numbers, computed numerically using prec bits of working
precision.
EXAMPLES::
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67)
sage: PP = P.numerical_approx()
sage: [c.real() for c in PP]
[6.00000000000000, -15.0000000000000, 1.00000000000000]
sage: all(c.imag().abs() < 1e-14 for c in PP)
True
sage: P.trace_to_real_numerical()
(1.61355529131986 : -2.18446840788880 : 1.00000000000000)
sage: P.trace_to_real_numerical(prec=80) # abs tol 1e-21
(1.6135552913198573127230 : -2.1844684078888023289187 : 1.0000000000000000000000)
"""
# Compute numerical approximation of P in E(K).
P = self.numerical_approx(prec=prec)
# Trace this numerical approximation down to E(Q) (numerically).
E = P.curve()
if self.curve().root_number() == -1:
R = 2*P
else:
R = P + E.point([x.conjugate() for x in P],check=False)
F = self.curve().change_ring(rings.RealField(prec))
return F.point([x.real() for x in R], check=False)
@cached_method
def _trace_exact_conductor_1(self, prec=53):
r"""
Return the trace from `K` to `\QQ` of this Kolyvagin point in
the case of conductor 1, computed using prec bits of
precision, then approximated using some algorithm (e.g.,
continued fractions). If the precision is not enough to
determine a point on the curve, then a RuntimeError is raised.
Even if the precision determines a point, there is no guarantee
that it is correct.
EXAMPLES:
A Kolyvagin point on a rank 1 curve::
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67)
sage: P.trace_to_real_numerical()
(1.61355529131986 : -2.18446840788880 : 1.00000000000000)
sage: P._trace_exact_conductor_1() # the actual point we're reducing
(1357/841 : -53277/24389 : 1)
sage: (P._trace_exact_conductor_1().height() / E.regulator()).sqrt()
12.0000000000000
"""
if not self.conductor() == 1:
raise ValueError("the conductor must be 1")
P = self.trace_to_real_numerical(prec)
return self._recognize_point_over_QQ(P, 2*self.index())
def _recognize_point_over_QQ(self, P, n):
r"""
Used internally when computing an exact point on an elliptic curve.
INPUT:
- `P` -- numerical approximation for a point on `E`
- `n` -- upper bound on divisibility index of `P` in group `E(\QQ)`
EXAMPLES::
sage: E = EllipticCurve('43a'); P = E.heegner_point(-20).kolyvagin_point()
sage: PP = P.numerical_approx(); PP
(...e-16 : -1.00000000000000 : 1.00000000000000)
sage: P._recognize_point_over_QQ(PP, 4)
(0 : -1 : 1)
"""
# Here is where we *should* implement the "method of Cremona
# etc" mentioned in Watkins' article... which involves local
# heights.
E = self.curve() # over Q
v = sum([list(n*w) for w in E.gens()] + [list(w) for w in E.torsion_points()], [])
# note -- we do not claim to prove anything, so making up a factor of 100 is fine.
max_denominator = 100*max([z.denominator() for z in v])
try:
# the coercion below also checks if point is on elliptic curve
return E([x.real().nearby_rational(max_denominator=max_denominator) for x in P])
except TypeError:
raise RuntimeError("insufficient precision to find exact point")
def mod(self, p, prec=53):
r"""
Return the trace of the reduction `Q` modulo a prime over `p` of this
Kolyvagin point as an element of `E(\GF{p})`, where
`p` is any prime that is inert in `K` that is coprime to `NDc`.
The point `Q` is only well defined up to an element of
`(p+1) E(\GF{p})`, i.e., it gives a well defined element
of the abelian group `E(\GF{p}) / (p+1) E(\GF{p})`.
See [St2011b]_, Proposition 5.4 for a proof of the above
well-definedness assertion.
EXAMPLES:
A Kolyvagin point on a rank 1 curve::
sage: E = EllipticCurve('37a1'); P = E.kolyvagin_point(-67)
sage: P.mod(2)
(1 : 1 : 1)
sage: P.mod(3)
(1 : 0 : 1)
sage: P.mod(5)
(2 : 2 : 1)
sage: P.mod(7)
(6 : 0 : 1)
sage: P.trace_to_real_numerical()
(1.61355529131986 : -2.18446840788880 : 1.00000000000000)
sage: P._trace_exact_conductor_1() # the actual point we're reducing
(1357/841 : -53277/24389 : 1)
sage: (P._trace_exact_conductor_1().height() / E.regulator()).sqrt()
12.0000000000000
Here the Kolyvagin point is a torsion point (since `E` has
rank 1), and we reduce it modulo several primes.::
sage: E = EllipticCurve('11a1'); P = E.kolyvagin_point(-7)
sage: P.mod(3,70) # long time (4s on sage.math, 2013)
(1 : 2 : 1)
sage: P.mod(5,70)
(1 : 4 : 1)
sage: P.mod(7,70)
Traceback (most recent call last):
...
ValueError: p must be coprime to conductors and discriminant
sage: P.mod(11,70)
Traceback (most recent call last):
...
ValueError: p must be coprime to conductors and discriminant
sage: P.mod(13,70)
(3 : 4 : 1)
"""
# check preconditions
p = ZZ(p)
if not p.is_prime():
raise ValueError("p must be prime")
E = self.curve()
D = self.discriminant()
if (E.conductor() * D * self.conductor()) % p == 0:
raise ValueError("p must be coprime to conductors and discriminant")
K = self.heegner_point().quadratic_field()
if len(K.factor(p)) != 1:
raise ValueError("p must be inert")
# do actual calculation
if self.conductor() == 1:
P = self._trace_exact_conductor_1(prec = prec)
return E.change_ring(GF(p))(P)
else:
raise NotImplementedError
## def congruent_rational_point(self, n, prec=53):
## r"""
## Let `P` be this Kolyvagin point. Determine whether there is a
## point `z` in `E(\QQ)` such that `z - P \in n E(K_c)`, where `K_c`
## is the ring class field over which this Kolyvagin point is defined.
## If `z` exists return `z`. Otherwise return None.
##
## INPUT:
##
## - `n` -- positive integer
##
## - ``prec`` -- positive integer (default: 53)
##
##
## EXAMPLES::
##
## """
## raise NotImplementedError
def kolyvagin_cohomology_class(self, n=None):
"""
INPUT:
- `n` -- positive integer that divides the gcd of `a_p`
and `p+1` for all `p` dividing the conductor. If `n` is
``None``, choose the largest valid `n`.
EXAMPLES::
sage: y = EllipticCurve('389a').heegner_point(-7,5)
sage: P = y.kolyvagin_point()
sage: P.kolyvagin_cohomology_class(3)
Kolyvagin cohomology class c(5) in H^1(K,E[3])
sage: y = EllipticCurve('37a').heegner_point(-7,5).kolyvagin_point()
sage: y.kolyvagin_cohomology_class()
Kolyvagin cohomology class c(5) in H^1(K,E[2])
"""
return KolyvaginCohomologyClassEn(self, n)
class KolyvaginCohomologyClass(SageObject):
"""
A Kolyvagin cohomology class in `H^1(K,E[n])` or `H^1(K,E)[n]`
attached to a Heegner point.
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7)
sage: c = y.kolyvagin_cohomology_class(3); c
Kolyvagin cohomology class c(1) in H^1(K,E[3])
sage: type(c)
<class 'sage.schemes.elliptic_curves.heegner.KolyvaginCohomologyClassEn'>
sage: loads(dumps(c)) == c
True
sage: y.kolyvagin_cohomology_class(5)
Kolyvagin cohomology class c(1) in H^1(K,E[5])
"""
def __init__(self, kolyvagin_point, n):
"""
EXAMPLES::
sage: y = EllipticCurve('389a').heegner_point(-7,5)
sage: y.kolyvagin_cohomology_class(3)
Kolyvagin cohomology class c(5) in H^1(K,E[3])
"""
if n is None:
c = kolyvagin_point.conductor()
E = kolyvagin_point.curve()
n = gcd([(p+1).gcd(E.ap(p)) for p in c.prime_divisors()])
if not kolyvagin_point.satisfies_kolyvagin_hypothesis(n):
raise ValueError("Kolyvagin point does not satisfy Kolyvagin hypothesis for %s"%n)
self.__kolyvagin_point = kolyvagin_point
self.__n = n
def __eq__(self, other):
"""
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7)
sage: c = y.kolyvagin_cohomology_class(3)
sage: c == y.kolyvagin_cohomology_class(3)
True
sage: c == y.kolyvagin_cohomology_class(5)
False
This does not mean that c is nonzero (!) -- it just means c is not the number 0::
sage: c == 0
False
"""
return isinstance(other, KolyvaginCohomologyClass) and \
self.__kolyvagin_point == other.__kolyvagin_point and \
self.__n == other.__n
def __ne__(self, other):
"""
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7)
sage: c = y.kolyvagin_cohomology_class(3)
sage: c != y.kolyvagin_cohomology_class(3)
False
sage: c != y.kolyvagin_cohomology_class(5)
True
"""
return not (self == other)
def n(self):
"""
Return the integer `n` so that this is a cohomology class in
`H^1(K,E[n])` or `H^1(K,E)[n]`.
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7)
sage: t = y.kolyvagin_cohomology_class(3); t
Kolyvagin cohomology class c(1) in H^1(K,E[3])
sage: t.n()
3
"""
return self.__n
def conductor(self):
r"""
Return the integer `c` such that this cohomology class is associated
to the Heegner point `y_c`.
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7,5)
sage: t = y.kolyvagin_cohomology_class()
sage: t.conductor()
5
"""
return self.__kolyvagin_point.conductor()
def kolyvagin_point(self):
"""
Return the Kolyvagin point `P_c` to which this cohomology
class is associated.
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7,5)
sage: t = y.kolyvagin_cohomology_class()
sage: t.kolyvagin_point()
Kolyvagin point of discriminant -7 and conductor 5 on elliptic curve of conductor 37
"""
return self.__kolyvagin_point
def heegner_point(self):
"""
Return the Heegner point `y_c` to which this cohomology class
is associated.
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7,5)
sage: t = y.kolyvagin_cohomology_class()
sage: t.heegner_point()
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 37
"""
return self.__kolyvagin_point.heegner_point()
class KolyvaginCohomologyClassEn(KolyvaginCohomologyClass):
"""
EXAMPLES:
"""
def _repr_(self):
"""
EXAMPLES::
sage: y = EllipticCurve('37a').heegner_point(-7,5)
sage: t = y.kolyvagin_cohomology_class()
sage: t._repr_()
'Kolyvagin cohomology class c(5) in H^1(K,E[2])'
"""
return "Kolyvagin cohomology class c(%s) in H^1(K,E[%s])"%(
self.conductor(), self.n())
#############################################################################
# Reduction of Heegner points using Quaternion Algebras
#
# This section contains implementations of algorithms for computing
# information about reduction modulo primes of Heegner points using
# quaternion algebras. Some of this code could later be moved to the
# quaternion algebras code, but it is too immature and not general
# enough at present for that.
#############################################################################
class HeegnerQuatAlg(SageObject):
r"""
Heegner points viewed as supersingular points on the modular curve
`X_0(N)/\mathbf{F}_{\ell}`.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(13); H
Heegner points on X_0(11) over F_13
sage: type(H)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerQuatAlg'>
sage: loads(dumps(H)) == H
True
"""
def __init__(self, level, ell):
r"""
INPUT:
- ``level`` -- the level (a positive integer)
- `\ell` -- the characteristic, a prime coprime to the level
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.HeegnerQuatAlg(11, 13)
Heegner points on X_0(11) over F_13
"""
level = ZZ(level); ell = ZZ(ell)
if not ell.is_prime():
raise ValueError("ell must be prime")
if level.gcd(ell) != 1:
raise ValueError("level and ell must be coprime")
self.__level = level
self.__ell = ell
def __eq__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3)
sage: H == heegner_points(11).reduce_mod(3)
True
sage: H == heegner_points(11).reduce_mod(5)
False
sage: H == 0
False
"""
return isinstance(other, HeegnerQuatAlg) and self.__level == other.__level \
and self.__ell == other.__ell
def __ne__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3)
sage: H != heegner_points(11).reduce_mod(3)
False
sage: H != heegner_points(11).reduce_mod(5)
True
sage: H != 0
True
"""
return not (self == other)
def _repr_(self):
"""
Return string representation.
EXAMPLES::
sage: heegner_points(11).reduce_mod(13)._repr_()
'Heegner points on X_0(11) over F_13'
"""
return "Heegner points on X_0(%s) over F_%s"%(
self.__level, self.__ell)
def level(self):
"""
Return the level.
EXAMPLES::
sage: heegner_points(11).reduce_mod(3).level()
11
"""
return self.__level
def ell(self):
r"""
Return the prime `\ell` modulo which we are working.
EXAMPLES::
sage: heegner_points(11).reduce_mod(3).ell()
3
"""
return self.__ell
def satisfies_heegner_hypothesis(self, D, c=ZZ(1)):
r"""
The fundamental discriminant `D` must be coprime to `N\ell`,
and must define a quadratic imaginary field `K` in which `\ell`
is inert. Also, all primes dividing `N` must split in `K`,
and `c` must be squarefree and coprime to `ND\ell`.
INPUT:
- `D` -- negative integer
- `c` -- positive integer (default: 1)
OUTPUT:
- bool
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(7)
sage: H.satisfies_heegner_hypothesis(-5)
False
sage: H.satisfies_heegner_hypothesis(-7)
False
sage: H.satisfies_heegner_hypothesis(-8)
True
sage: [D for D in [-1,-2..-100] if H.satisfies_heegner_hypothesis(D)]
[-8, -39, -43, -51, -79, -95]
"""
D = ZZ(D); c = ZZ(c)
if gcd(c*D, self.__level*self.__ell) != 1 or gcd(c,D) != 1:
return False
if not satisfies_weak_heegner_hypothesis(self.__level, D):
return False
if not is_inert(D, self.__ell):
return False
return True
def heegner_discriminants(self, n=5):
r"""
Return the first `n` negative fundamental discriminants
coprime to `N\ell` such that `\ell` is inert in the
corresponding quadratic imaginary field and that field
satisfies the Heegner hypothesis, and `N` is the level.
INPUT:
- `n` -- positive integer (default: 5)
OUTPUT:
- list
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3)
sage: H.heegner_discriminants()
[-7, -19, -40, -43, -52]
sage: H.heegner_discriminants(10)
[-7, -19, -40, -43, -52, -79, -127, -139, -151, -184]
"""
v = []
D = ZZ(-5)
while len(v) < n:
if self.satisfies_heegner_hypothesis(D):
v.append(D)
D -= 1
return v
def heegner_conductors(self, D, n=5):
r"""
Return the first `n` negative fundamental discriminants
coprime to `N\ell` such that `\ell` is inert in the
corresponding quadratic imaginary field and that field
satisfies the Heegner hypothesis.
INPUT:
- `D` -- negative integer; a fundamental Heegner
discriminant
- `n` -- positive integer (default: 5)
OUTPUT:
- list
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3)
sage: H.heegner_conductors(-7)
[1, 2, 4, 5, 8]
sage: H.heegner_conductors(-7, 10)
[1, 2, 4, 5, 8, 10, 13, 16, 17, 19]
"""
v = [ZZ(1)]
c = ZZ(2)
while len(v) < n:
if self.satisfies_heegner_hypothesis(D, c):
v.append(c)
c += 1
return v
def optimal_embeddings(self, D, c, R):
"""
INPUT:
- `D` -- negative fundamental discriminant
- `c` -- integer coprime
- `R` -- Eichler order
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3)
sage: R = H.left_orders()[0]
sage: H.optimal_embeddings(-7, 1, R)
[Embedding sending sqrt(-7) to i - j - k,
Embedding sending sqrt(-7) to -i + j + k]
sage: H.optimal_embeddings(-7, 2, R)
[Embedding sending 2*sqrt(-7) to 5*i - k,
Embedding sending 2*sqrt(-7) to -5*i + k,
Embedding sending 2*sqrt(-7) to 2*i - 2*j - 2*k,
Embedding sending 2*sqrt(-7) to -2*i + 2*j + 2*k]
"""
Q, G = R.ternary_quadratic_form(include_basis=True)
n = -D*c*c
reps = Q.representation_vector_list(n+1)[-1]
# The representatives give elements in terms of the
# subspace's basis such that the embedding is given by
# phi(c*sqrt(D)) = beta
E = []
for r in reps:
beta = sum(G[i]*r[i] for i in range(len(G)))
phi = HeegnerQuatAlgEmbedding(D, c, R, beta)
E.append(phi)
return E
@cached_method
def brandt_module(self):
"""
Return the Brandt module of right ideal classes that we
used to represent the set of supersingular points on
the modular curve.
EXAMPLES::
sage: heegner_points(11).reduce_mod(3).brandt_module()
Brandt module of dimension 2 of level 3*11 of weight 2 over Rational Field
"""
from sage.modular.quatalg.all import BrandtModule
return BrandtModule(self.__ell, self.__level)
@cached_method
def quaternion_algebra(self):
"""
Return the rational quaternion algebra used to implement self.
EXAMPLES::
sage: heegner_points(389).reduce_mod(7).quaternion_algebra()
Quaternion Algebra (-1, -7) with base ring Rational Field
"""
return self.brandt_module().quaternion_algebra()
def right_ideals(self):
"""
Return representative right ideals in the Brandt module.
EXAMPLES::
sage: heegner_points(11).reduce_mod(3).right_ideals()
(Fractional ideal (2 + 2*j + 28*k, 2*i + 26*k, 4*j + 12*k, 44*k),
Fractional ideal (2 + 2*j + 28*k, 2*i + 4*j + 38*k, 8*j + 24*k, 88*k))
"""
return self.brandt_module().right_ideals()
@cached_method
def left_orders(self):
"""
Return the left orders associated to the representative right
ideals in the Brandt module.
EXAMPLES::
sage: heegner_points(11).reduce_mod(3).left_orders()
[Order of Quaternion Algebra (-1, -3) with base ring Rational Field with basis (1/2 + 1/2*j + 7*k, 1/2*i + 13/2*k, j + 3*k, 11*k),
Order of Quaternion Algebra (-1, -3) with base ring Rational Field with basis (1/2 + 1/2*j + 7*k, 1/4*i + 1/2*j + 63/4*k, j + 14*k, 22*k)]
"""
return [I.left_order() for I in self.right_ideals()]
@cached_method
def heegner_divisor(self, D, c=ZZ(1)):
r"""
Return Heegner divisor as an element of the Brandt module
corresponding to the discriminant `D` and conductor `c`, which
both must be coprime to `N\ell`.
More precisely, we compute the sum of the reductions of the
`\textrm{Gal}(K_1/K)`-conjugates of each choice of `y_1`,
where the choice comes from choosing the ideal `\mathcal{N}`.
Then we apply the Hecke operator `T_c` to this sum.
INPUT:
- `D` -- discriminant (negative integer)
- `c` -- conductor (positive integer)
OUTPUT:
- Brandt module element
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(7)
sage: H.heegner_discriminants()
[-8, -39, -43, -51, -79]
sage: H.heegner_divisor(-8)
(1, 0, 0, 1, 0, 0)
sage: H.heegner_divisor(-39)
(1, 2, 2, 1, 2, 0)
sage: H.heegner_divisor(-43)
(1, 0, 0, 1, 0, 0)
sage: H.heegner_divisor(-51)
(1, 0, 0, 1, 0, 2)
sage: H.heegner_divisor(-79)
(3, 2, 2, 3, 0, 0)
sage: sum(H.heegner_divisor(-39).element())
8
sage: QuadraticField(-39,'a').class_number()
4
"""
if not self.satisfies_heegner_hypothesis(D, c):
raise ValueError("D and c must be coprime to N and ell")
B = self.brandt_module()
if c > 1:
# Just apply T_c to divisor for c=1
z = self.heegner_divisor(D)
return B.hecke_operator(c)(z)
n = -D
v = [0]*B.degree()
for i, R in enumerate(self.left_orders()):
Q = R.ternary_quadratic_form()
a = Q.theta_series(n+1)[n]
if a > 0:
reps = Q.representation_vector_list(n+1)[-1]
k = len([r for r in reps if gcd(r) == 1])
assert k%2 == 0
v[i] += k // 2
return B(v)
@cached_method
def modp_splitting_data(self, p):
r"""
Return mod `p` splitting data for the quaternion algebra at the
unramified prime `p`. This is a pair of `2\times 2` matrices
`A`, `B` over the finite field `\GF{p}` such that if the
quaternion algebra has generators `i, j, k`, then the
homomorphism sending `i` to `A` and `j` to `B` maps any
maximal order homomorphically onto the ring of `2\times 2` matrices.
Because of how the homomorphism is defined, we must assume that the
prime `p` is odd.
INPUT:
- `p` -- unramified odd prime
OUTPUT:
- 2-tuple of matrices over finite field
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(7)
sage: H.quaternion_algebra()
Quaternion Algebra (-1, -7) with base ring Rational Field
sage: I, J = H.modp_splitting_data(13)
sage: I
[ 0 12]
[ 1 0]
sage: J
[7 3]
[3 6]
sage: I^2
[12 0]
[ 0 12]
sage: J^2
[6 0]
[0 6]
sage: I*J == -J*I
True
The following is a good test because of the asserts in the code::
sage: v = [H.modp_splitting_data(p) for p in primes(13,200)]
Some edge cases::
sage: H.modp_splitting_data(11)
(
[ 0 10] [6 1]
[ 1 0], [1 5]
)
Proper error handling::
sage: H.modp_splitting_data(7)
Traceback (most recent call last):
...
ValueError: p (=7) must be an unramified prime
sage: H.modp_splitting_data(2)
Traceback (most recent call last):
...
ValueError: p must be odd
"""
p = ZZ(p)
if not p.is_prime():
raise ValueError("p (=%s) must be prime"%p)
if p == 2:
raise ValueError("p must be odd")
Q = self.quaternion_algebra()
if Q.discriminant() % p == 0:
raise ValueError("p (=%s) must be an unramified prime"%p)
i, j, k = Q.gens()
F = GF(p)
i2 = F(i*i)
j2 = F(j*j)
M = MatrixSpace(F, 2)
I = M([0,i2,1,0])
i2inv = 1/i2
a = None
#for b in reversed(list(F)):
for b in list(F):
if not b: continue
c = j2 + i2inv * b*b
if c.is_square():
a = -c.sqrt()
break
assert a is not None, "bug in that no splitting solution found"
J = M([a,b,(j2-a*a)/b, -a])
assert I*J == -J*I, "bug in that I,J do not skew commute"
return I, J
def modp_splitting_map(self, p):
r"""
Return (algebra) map from the (`p`-integral) quaternion algebra to
the set of `2\times 2` matrices over `\GF{p}`.
INPUT:
- `p` -- prime number
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(7)
sage: f = H.modp_splitting_map(13)
sage: B = H.quaternion_algebra(); B
Quaternion Algebra (-1, -7) with base ring Rational Field
sage: i,j,k = H.quaternion_algebra().gens()
sage: a = 2+i-j+3*k; b = 7+2*i-4*j+k
sage: f(a*b)
[12 3]
[10 5]
sage: f(a)*f(b)
[12 3]
[10 5]
"""
I, J = self.modp_splitting_data(p)
K = I*J
F = I.base_ring()
def phi(q):
v = [F(a) for a in q.coefficient_tuple()]
return v[0] + I*v[1] + J*v[2] + K*v[3]
return phi
def cyclic_subideal_p1(self, I, c):
r"""
Compute dictionary mapping 2-tuples that defined normalized
elements of `P^1(\ZZ/c\ZZ)`
INPUT:
- `I` -- right ideal of Eichler order or in quaternion algebra
- `c` -- square free integer (currently must be odd prime
and coprime to level, discriminant, characteristic,
etc.
OUTPUT:
- dictionary mapping 2-tuples (u,v) to ideals
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(7)
sage: I = H.brandt_module().right_ideals()[0]
sage: sorted(H.cyclic_subideal_p1(I,3).items())
[((0, 1),
Fractional ideal (2 + 2*j + 32*k, 2*i + 8*j + 82*k, 12*j + 60*k, 132*k)),
((1, 0),
Fractional ideal (2 + 10*j + 28*k, 2*i + 4*j + 62*k, 12*j + 60*k, 132*k)),
((1, 1),
Fractional ideal (2 + 2*j + 76*k, 2*i + 4*j + 106*k, 12*j + 60*k, 132*k)),
((1, 2),
Fractional ideal (2 + 10*j + 116*k, 2*i + 8*j + 38*k, 12*j + 60*k, 132*k))]
sage: len(H.cyclic_subideal_p1(I,17))
18
"""
c = ZZ(c)
if not c.is_prime():
raise NotImplementedError("currently c must be prime")
if c == 2:
raise NotImplementedError("currently c must be odd")
phi = self.modp_splitting_map(c)
B = self.brandt_module()
P1 = P1List(c)
ans = {}
# Actually they are submodules despite the name below.
for J in B.cyclic_submodules(I, c):
B = J.basis()
V = phi(B[0]).kernel()
for i in [1,2,3]:
V = V.intersection(phi(B[i]).kernel())
b = V.basis()
assert len(b) == 1, "common kernel must have dimension 1"
uv = P1.normalize(ZZ(b[0][0])%c, ZZ(b[0][1])%c)
ans[uv] = J
assert len(ans) == c+1
return ans
@cached_method
def galois_group_over_hilbert_class_field(self, D, c):
"""
Return the Galois group of the extension of ring class fields
`K_c` over the Hilbert class field `K_{1}` of the quadratic
imaginary field of discriminant `D`.
INPUT:
- `D` -- fundamental discriminant
- `c` -- conductor (square-free integer)
EXAMPLES::
sage: N = 37; D = -7; ell = 17; c = 41; p = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: H.galois_group_over_hilbert_class_field(D, c)
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 41 over Hilbert class field of QQ[sqrt(-7)]
"""
Kc = heegner_points(self.level(), D, c).ring_class_field()
K1 = heegner_points(self.level(), D, 1).ring_class_field()
return Kc.galois_group(K1)
@cached_method
def galois_group_over_quadratic_field(self, D, c):
"""
Return the Galois group of the extension of ring class fields
`K_c` over the quadratic imaginary field `K` of discriminant `D`.
INPUT:
- `D` -- fundamental discriminant
- `c` -- conductor (square-free integer)
EXAMPLES::
sage: N = 37; D = -7; ell = 17; c = 41; p = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: H.galois_group_over_quadratic_field(D, c)
Galois group of Ring class field extension of QQ[sqrt(-7)] of conductor 41 over Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
"""
Kc = heegner_points(self.level(), D, c).ring_class_field()
return Kc.galois_group(Kc.quadratic_field())
@cached_method
def quadratic_field(self, D):
"""
Return our fixed choice of quadratic imaginary field of
discriminant `D`.
INPUT:
- `D` -- fundamental discriminant
OUTPUT:
- a quadratic number field
EXAMPLES::
sage: H = heegner_points(389).reduce_mod(5)
sage: H.quadratic_field(-7)
Number Field in sqrt_minus_7 with defining polynomial x^2 + 7 with sqrt_minus_7 = 2.645751311064591?*I
"""
Kc = heegner_points(self.level(), D, 1).ring_class_field()
return Kc.quadratic_field()
@cached_method
def kolyvagin_cyclic_subideals(self, I, p, alpha_quaternion):
r"""
Return list of pairs `(J, n)` where `J` runs through the
cyclic subideals of `I` of index `(\ZZ/p\ZZ)^2`, and `J \sim
\alpha^n(J_0)` for some fixed choice of cyclic subideal `J_0`.
INPUT:
- `I` -- right ideal of the quaternion algebra
- `p` -- prime number
- ``alpha_quaternion`` -- image in the quaternion algebra
of generator `\alpha` for
`(\mathcal{O}_K / c\mathcal{O}_K)^* / (\ZZ/c\ZZ)^*`.
OUTPUT:
- list of 2-tuples
EXAMPLES::
sage: N = 37; D = -7; ell = 17; c=5
sage: H = heegner_points(N).reduce_mod(ell)
sage: B = H.brandt_module(); I = B.right_ideals()[32]
sage: f = H.optimal_embeddings(D, 1, I.left_order())[1]
sage: g = H.kolyvagin_generators(f.domain().number_field(), c)
sage: alpha_quaternion = f(g[0]); alpha_quaternion
1 - 5/128*i - 77/192*j + 137/384*k
sage: H.kolyvagin_cyclic_subideals(I, 5, alpha_quaternion)
[(Fractional ideal (2 + 874/3*j + 128356/3*k, 2*i + 932/3*j + 198806/3*k, 2560/3*j + 33280/3*k, 94720*k), 0), (Fractional ideal (2 + 462*j + 82892*k, 2*i + 932/3*j + 141974/3*k, 2560/3*j + 33280/3*k, 94720*k), 1), (Fractional ideal (2 + 2410/3*j + 261988/3*k, 2*i + 652*j + 89650*k, 2560/3*j + 33280/3*k, 94720*k), 2), (Fractional ideal (2 + 2410/3*j + 91492/3*k, 2*i + 1444/3*j + 148630/3*k, 2560/3*j + 33280/3*k, 94720*k), 3), (Fractional ideal (2 + 874/3*j + 71524/3*k, 2*i + 2468/3*j + 275606/3*k, 2560/3*j + 33280/3*k, 94720*k), 4), (Fractional ideal (2 + 462*j + 63948*k, 2*i + 2468/3*j + 218774/3*k, 2560/3*j + 33280/3*k, 94720*k), 5)]
"""
X = I.cyclic_right_subideals(p, alpha_quaternion)
return [(J, i) for i, J in enumerate(X)]
@cached_method
def kolyvagin_generator(self, K, p):
r"""
Return element in `K` that maps to the multiplicative generator
for the quotient group
`(\mathcal{O}_K / p \mathcal{O}_K)^* / (\ZZ/p\ZZ)^*`
of the form `\sqrt{D}+n` with `n\geq 1` minimal.
INPUT:
- `K` -- quadratic imaginary field
- `p` -- inert prime
EXAMPLES::
sage: N = 37; D = -7; ell = 17; p=5
sage: H = heegner_points(N).reduce_mod(ell)
sage: B = H.brandt_module(); I = B.right_ideals()[32]
sage: f = H.optimal_embeddings(D, 1, I.left_order())[0]
sage: H.kolyvagin_generator(f.domain().number_field(), 5)
a + 1
This function requires that p be prime, but kolyvagin_generators works in general::
sage: H.kolyvagin_generator(f.domain().number_field(), 5*17)
Traceback (most recent call last):
...
NotImplementedError: p must be prime
sage: H.kolyvagin_generators(f.domain().number_field(), 5*17)
[-34*a + 1, 35*a + 106]
"""
p = ZZ(p)
if not p.is_prime():
raise NotImplementedError("p must be prime")
if K.discriminant() % p == 0:
raise ValueError("p must be unramified")
if len(K.factor(p)) != 1:
raise ValueError("p must be inert")
F = K.residue_field(p)
a = F.gen()
assert a*a == K.discriminant(), "bug: we assumed generator of finite field must be square root of discriminant, but for some reason this is not true"
for n in range(1,p):
if (a + n).multiplicative_order() % (p*p-1) == 0:
return K.gen() + n
raise RuntimeError("there is a bug in kolyvagin_generator")
@cached_method
def kolyvagin_generators(self, K, c):
r"""
Return elements in `\mathcal{O}_K` that map to multiplicative generators
for the factors of the quotient group
`(\mathcal{O}_K / c \mathcal{O}_K)^* / (\ZZ/c\ZZ)^*`
corresponding to the prime divisors of c. Each generator is
of the form `\sqrt{D}+n` with `n\geq 1` minimal.
INPUT:
- `K` -- quadratic imaginary field
- `c` -- square free product of inert prime
EXAMPLES::
sage: N = 37; D = -7; ell = 17; p=5
sage: H = heegner_points(N).reduce_mod(ell)
sage: B = H.brandt_module(); I = B.right_ideals()[32]
sage: f = H.optimal_embeddings(D, 1, I.left_order())[0]
sage: H.kolyvagin_generators(f.domain().number_field(), 5*17)
[-34*a + 1, 35*a + 106]
"""
v = []
F = ZZ(c).factor()
from sage.rings.integer_ring import crt_basis
B = crt_basis([x[0] for x in F])
for i, (p, e) in enumerate(F):
if e > 1:
raise ValueError("c must be square free")
alpha = self.kolyvagin_generator(K, p)
# Now we use the Chinese Remainder Theorem to make an element
# of O_K that equals alpha modulo p and equals 1 modulo
# all other prime divisors of c.
Z = [1]*len(B)
Z[i] = alpha[0]
a0 = sum([Z[j]*B[j] for j in range(len(B))])
Z = [0]*len(B)
Z[i] = alpha[1]
a1 = sum([Z[j]*B[j] for j in range(len(B))])
v.append(alpha.parent()([a0,a1]))
return v
@cached_method
def kolyvagin_sigma_operator(self, D, c, r, bound=None):
"""
Return the action of the Kolyvagin sigma operator on the `r`-th
basis vector.
INPUT:
- `D` -- fundamental discriminant
- `c` -- conductor (square-free integer, need not be prime)
- `r` -- nonnegative integer
- ``bound`` -- (default: ``None``), if given, controls
precision of computation of theta series, which could
impact performance, but does not impact correctness
EXAMPLES:
We first try to verify Kolyvagin's conjecture for a rank 2
curve by working modulo 5, but we are unlucky with `c=17`::
sage: N = 389; D = -7; ell = 5; c = 17; q = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: E = EllipticCurve('389a')
sage: V = H.modp_dual_elliptic_curve_factor(E, q, 5) # long time (4s on sage.math, 2012)
sage: k118 = H.kolyvagin_sigma_operator(D, c, 118)
sage: k104 = H.kolyvagin_sigma_operator(D, c, 104)
sage: [b.dot_product(k104.element().change_ring(GF(3))) for b in V.basis()] # long time
[0, 0]
sage: [b.dot_product(k118.element().change_ring(GF(3))) for b in V.basis()] # long time
[0, 0]
Next we try again with `c=41` and this does work, in that we
get something nonzero, when dotting with V::
sage: c = 41
sage: k118 = H.kolyvagin_sigma_operator(D, c, 118)
sage: k104 = H.kolyvagin_sigma_operator(D, c, 104)
sage: [b.dot_product(k118.element().change_ring(GF(3))) for b in V.basis()] # long time
[2, 0]
sage: [b.dot_product(k104.element().change_ring(GF(3))) for b in V.basis()] # long time
[1, 0]
By the way, the above is the first ever provable verification
of Kolyvagin's conjecture for any curve of rank at least 2.
Another example, but where the curve has rank 1::
sage: N = 37; D = -7; ell = 17; c = 41; q = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: H.heegner_divisor(D,1).element().nonzero_positions()
[32, 51]
sage: k32 = H.kolyvagin_sigma_operator(D, c, 32); k32
(17, 12, 33, 33, 49, 108, 3, 0, 0, 33, 37, 49, 33, 33, 59, 54, 21, 30, 0, 0, 29, 12, 41, 38, 33, 15, 0, 0, 4, 0, 7, 0, 0, 0, 0, 34, 26, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
sage: k51 = H.kolyvagin_sigma_operator(D, c, 51); k51
(5, 13, 0, 0, 14, 0, 21, 0, 0, 0, 29, 0, 0, 45, 0, 6, 0, 40, 0, 61, 0, 0, 40, 32, 0, 9, 0, 0, 0, 0, 17, 0, 0, 0, 77, 40, 2, 10, 18, 0, 0, 61, 19, 45, 26, 80, 61, 35, 35, 19, 1, 0)
sage: V = H.modp_dual_elliptic_curve_factor(EllipticCurve('37a'), q, 5); V
Vector space of degree 52 and dimension 2 over Ring of integers modulo 3
Basis matrix:
2 x 52 dense matrix over Ring of integers modulo 3
sage: [b.dot_product(k32.element().change_ring(GF(q))) for b in V.basis()]
[2, 2]
sage: [b.dot_product(k51.element().change_ring(GF(q))) for b in V.basis()]
[1, 1]
An example with `c` a product of two primes::
sage: N = 389; D = -7; ell = 5; q = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: V = H.modp_dual_elliptic_curve_factor(EllipticCurve('389a'), q, 5)
sage: k = H.kolyvagin_sigma_operator(D, 17*41, 104) # long time
sage: k # long time
(990, 656, 219, ..., 246, 534, 1254)
sage: [b.dot_product(k.element().change_ring(GF(3))) for b in V.basis()] # long time (but only because depends on something slow)
[0, 0]
"""
B = self.brandt_module()
RI = B.right_ideals()
f = self.optimal_embeddings(D, 1, RI[r].left_order())[0]
alphas = self.kolyvagin_generators(f.domain().number_field(), c)
alpha_quaternions = [f(x) for x in alphas]
if bound is None:
bound = B.dimension() // 2 + 5
theta_dict = B._theta_dict(bound)
c = ZZ(c)
J_lists = []
F = c.factor()
I = RI[r]
for i, (p, e) in enumerate(F):
if e > 1: raise ValueError("c must be square free")
X = I.cyclic_right_subideals(p, alpha_quaternions[i])
J_lists.append(dict(enumerate(X)))
ans = [0]*B.dimension()
from sage.misc.mrange import cartesian_product_iterator
for v in cartesian_product_iterator([range(1,p+1) for p,_ in F]):
J = J_lists[0][v[0]]
for i in range(1,len(J_lists)):
J = J.intersection(J_lists[i][v[i]])
J_theta = tuple(J.theta_series_vector(bound))
d = theta_dict[J_theta]
j = None
if len(d) == 1:
j = d[0]
else:
for z in d:
if RI[z].is_equivalent(J, 0):
j = z
# we found the right j
break
if j is None:
raise RuntimeError("bug finding equivalent ideal")
ans[j] += prod(v)
return B(ans)
@cached_method
def modp_dual_elliptic_curve_factor(self, E, p, bound=10):
"""
Return the factor of the Brandt module space modulo `p`
corresponding to the elliptic curve `E`, cut out using
Hecke operators up to ``bound``.
INPUT:
- `E` -- elliptic curve of conductor equal to the level of self
- `p` -- prime number
- `bound` -- positive integer (default: 10)
EXAMPLES::
sage: N = 37; D = -7; ell = 17; c = 41; q = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: V = H.modp_dual_elliptic_curve_factor(EllipticCurve('37a'), q, 5); V
Vector space of degree 52 and dimension 2 over Ring of integers modulo 3
Basis matrix:
2 x 52 dense matrix over Ring of integers modulo 3
"""
if E.conductor() != self.level():
raise ValueError("conductor of E must equal level of self")
p = ZZ(p)
if not p.is_prime():
raise ValueError("p (=%s) must be prime"%p)
bad = self.__level * self.__ell
V = None
q = ZZ(2)
B = self.brandt_module()
F = GF(p)
while q <= bound and (V is None or V.dimension() > 2):
verbose("q = %s"%q)
if bad % q != 0:
T = B._compute_hecke_matrix_directly(q).change_ring(F).transpose()
if V is None:
V = (T - E.ap(q)).kernel()
else:
t = T.restrict(V)
W = (t - E.ap(q)).kernel()
V = (W.basis_matrix() * V.basis_matrix()).row_space()
q = q.next_prime()
return V
@cached_method
def rational_kolyvagin_divisor(self, D, c):
r"""
Return the Kolyvagin divisor as an element of the Brandt module
corresponding to the discriminant `D` and conductor `c`, which
both must be coprime to `N\ell`.
INPUT:
- `D` -- discriminant (negative integer)
- `c` -- conductor (positive integer)
OUTPUT:
- Brandt module element (or tuple of them)
EXAMPLES::
sage: N = 389; D = -7; ell = 5; c = 17; q = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: k = H.rational_kolyvagin_divisor(D, c); k # long time (5s on sage.math, 2013)
(2, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 4, 0, 0, 9, 11, 0, 6, 0, 0, 7, 0, 0, 0, 0, 14, 12, 13, 15, 17, 0, 0, 0, 0, 8, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
sage: V = H.modp_dual_elliptic_curve_factor(EllipticCurve('389a'), q, 2)
sage: [b.dot_product(k.element().change_ring(GF(q))) for b in V.basis()] # long time
[0, 0]
sage: k = H.rational_kolyvagin_divisor(D, 59)
sage: [b.dot_product(k.element().change_ring(GF(q))) for b in V.basis()]
[2, 0]
"""
if not self.satisfies_heegner_hypothesis(D, c):
raise ValueError("D and c must be coprime to N and ell")
hd = self.heegner_divisor(D)
v = hd.element()
if class_number(D) != 1:
raise NotImplementedError("class number greater than 1 not implemented")
i = min(v.nonzero_positions())
return self.kolyvagin_sigma_operator(D, c, i)
#w = 0
#for i, a in six.iteritems(v.dict()):
# w += a * self.kolyvagin_sigma_operator(D, c, i)
# return w
@cached_method
def kolyvagin_point_on_curve(self, D, c, E, p, bound=10):
r"""
Compute image of the Kolyvagin divisor `P_c` in
`E(\GF{\ell^2}) / p E(\GF{\ell^2})`.
Note that this image is by definition only well defined up to
scalars. However, doing multiple computations will always
yield the same result, and working modulo different `\ell` is
compatible (since we always choose the same generator for
`\textrm{Gal}(K_c/K_1)`).
INPUT:
- `D` -- fundamental negative discriminant
- `c` -- conductor
- `E` -- elliptic curve of conductor the level of self
- `p` -- odd prime number such that we consider image in
`E(\GF{\ell^2}) / p E(\GF{\ell^2})`
- ``bound`` -- integer (default: 10)
EXAMPLES::
sage: N = 37; D = -7; ell = 17; c = 41; p = 3
sage: H = heegner_points(N).reduce_mod(ell)
sage: H.kolyvagin_point_on_curve(D, c, EllipticCurve('37a'), p)
[2, 2]
"""
k = self.rational_kolyvagin_divisor(D, c)
V = self.modp_dual_elliptic_curve_factor(E, p, bound)
return [b.dot_product(k.element().change_ring(GF(p))) for b in V.basis()]
def kolyvagin_reduction_data(E, q, first_only=True):
r"""
Given an elliptic curve of positive rank and a prime `q`, this
function returns data about how to use Kolyvagin's `q`-torsion
Heegner point Euler system to do computations with this curve.
See the precise description of the output below.
INPUT:
- `E` -- elliptic curve over `\QQ` of rank 1 or 2
- `q` -- an odd prime that does not divide the order of the
rational torsion subgroup of `E`
- ``first_only`` -- bool (default: ``True``) whether two only return
the first prime that one can work modulo to get data about
the Euler system
OUTPUT in the rank 1 case or when the default flag ``first_only=True``:
- `\ell` -- first good odd prime satisfying the Kolyvagin
condition that `q` divides \gcd(a_{\ell},\ell+1)` and the
reduction map is surjective to `E(\GF{\ell}) / q
E(\GF{\ell})`
- `D` -- discriminant of the first quadratic imaginary field
`K` that satisfies the Heegner hypothesis for `E` such that
both `\ell` is inert in `K`, and the twist `E^D` has analytic
rank `\leq 1`
- `h_D` -- the class number of `K`
- the dimension of the Brandt module `B(\ell,N)`, where `N` is
the conductor of `E`
OUTPUT in the rank 2 case:
- `\ell_1` -- first prime (as above in the rank 1 case) where
reduction map is surjective
- `\ell_2` -- second prime (as above) where reduction map is
surjective
- `D` -- discriminant of the first quadratic imaginary field
`K` that satisfies the Heegner hypothesis for `E` such that
both `\ell_1` and `\ell_2` are simultaneously inert in `K`,
and the twist `E^D` has analytic rank `\leq 1`
- `h_D` -- the class number of `K`
- the dimension of the Brandt module `B(\ell_1,N)`, where `N` is
the conductor of `E`
- the dimension of the Brandt module `B(\ell_2,N)`
EXAMPLES:
Import this function::
sage: from sage.schemes.elliptic_curves.heegner import kolyvagin_reduction_data
A rank 1 example::
sage: kolyvagin_reduction_data(EllipticCurve('37a1'),3)
(17, -7, 1, 52)
A rank 3 example::
sage: kolyvagin_reduction_data(EllipticCurve('5077a1'),3)
(11, -47, 5, 4234)
sage: H = heegner_points(5077, -47)
sage: [c for c in H.kolyvagin_conductors(2,10,EllipticCurve('5077a1'),3) if c%11]
[667, 943, 1189, 2461]
sage: factor(667)
23 * 29
A rank 4 example (the first Kolyvagin class that we could try to
compute would be `P_{23\cdot 29\cdot 41}`, and would require
working in a space of dimension 293060 (so prohibitive at
present)::
sage: E = elliptic_curves.rank(4)[0]
sage: kolyvagin_reduction_data(E,3) # long time
(11, -71, 7, 293060)
sage: H = heegner_points(293060, -71)
sage: H.kolyvagin_conductors(1,4,E,3)
[11, 17, 23, 41]
The first rank 2 example::
sage: kolyvagin_reduction_data(EllipticCurve('389a'),3)
(5, -7, 1, 130)
sage: kolyvagin_reduction_data(EllipticCurve('389a'),3, first_only=False)
(5, 17, -7, 1, 130, 520)
A large `q = 7`::
sage: kolyvagin_reduction_data(EllipticCurve('1143c1'),7, first_only=False)
(13, 83, -59, 3, 1536, 10496)
Additive reduction::
sage: kolyvagin_reduction_data(EllipticCurve('2350g1'),5, first_only=False)
(19, 239, -311, 19, 6480, 85680)
"""
from .ell_generic import is_EllipticCurve
if not is_EllipticCurve(E):
raise TypeError("E must be an elliptic curve")
q = ZZ(q)
if not q.is_prime():
raise ValueError("q must be a prime")
if q.gcd(E.torsion_order()) != 1:
raise NotImplementedError("q must be coprime to torsion")
N = E.conductor()
r = E.rank()
if r == 0:
raise ValueError("E must have positive rank")
if E.rank() == 1:
first_only = True
from sage.modular.quatalg.all import BrandtModule
def twist_is_minimal(D):
# return True if the quadratic twist E^D has analytic rank <= 1
return E.quadratic_twist(D).analytic_rank() <= 1
def red(P, ell):
# reduce the point P on the elliptic curve modulo ell
w = list(P)
d = lcm([a.denominator() for a in w])
return E.change_ring(GF(ell))([d*a for a in w])
def best_heegner_D(ell_1, ell_2):
# return the first Heegner D satisfy all hypothesis such that
# both ell_1 and ell_2 are inert
D = -5
while True:
if number_field.is_fundamental_discriminant(D) and \
D % ell_1 and D % ell_2 and \
E.satisfies_heegner_hypothesis(D) and \
is_inert(D, ell_1) and is_inert(D, ell_2) and \
twist_is_minimal(D):
return D
D -= 1
if first_only:
# find first prime ell with various conditions
# such that reduction is surjective to E(F_ell)/q.
ell = ZZ(3)
while True:
while N % ell == 0 or gcd(ell+1,E.ap(ell)) % q != 0:
ell = ell.next_prime()
# determine if mod ell reduction is surjective, using
# partly that it is a lemma that E(F_ell)/q is cyclic.
m = ZZ(E.Np(ell) / q)
for P in E.gens():
if red(P,ell) * m != 0:
# bingo, is surjective
D = best_heegner_D(ell,ell)
return (ell, D, class_number(D), BrandtModule(ell,N).dimension())
# end for
ell = ell.next_prime()
if E.rank() != 2:
raise ValueError("if first_only is not True, then the curve E must have rank 1 or 2")
P, Q = E.gens()
def kernel_of_reduction(ell):
# return list of reps for the kernel as a subgroup of the map
# E(Q) / q E(Q) ----> E(F_ell) / q E(F_ell)
m = ZZ(E.Np(ell) / q)
A = [a*P + b*Q for a in range(q) for b in range(q)]
return [z for z in A if red(z,ell) * m == 0]
# compute first good odd prime
ell_1 = ZZ(3)
while True:
while N % ell_1 == 0 or gcd(ell_1+1,E.ap(ell_1)) % q != 0:
ell_1 = ell_1.next_prime()
# compute kernel of reduction modulo ell_1
G1 = set(kernel_of_reduction(ell_1))
if len(G1) == q:
break
ell_1 = ell_1.next_prime()
# compute next good odd prime with distinct kernel of order q
ell_2 = ell_1.next_prime()
while True:
while N % ell_2 == 0 or gcd(ell_2+1,E.ap(ell_2)) % q != 0:
ell_2 = ell_2.next_prime()
G2 = set(kernel_of_reduction(ell_2))
if G1 != G2 and len(G2) == q:
break
ell_2 = ell_2.next_prime()
# Find smallest D where both ell_1 and ell_2 are inert
D = best_heegner_D(ell_1, ell_2)
return (ell_1, ell_2, D, class_number(D),
BrandtModule(ell_1,N).dimension(),
BrandtModule(ell_2,N).dimension())
class HeegnerQuatAlgEmbedding(SageObject):
r"""
The homomorphism `\mathcal{O} \to R`, where `\mathcal{O}` is the
order of conductor `c` in the quadratic field of discriminant `D`,
and `R` is an Eichler order in a quaternion algebra.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 2, R)[1]; f
Embedding sending 2*sqrt(-7) to -5*i + k
sage: type(f)
<class 'sage.schemes.elliptic_curves.heegner.HeegnerQuatAlgEmbedding'>
sage: loads(dumps(f)) == f
True
"""
def __init__(self, D, c, R, beta):
r"""
INPUT:
- `D` -- negative fundamental discriminant
- `c` -- positive integer coprime to `D`
- `R` -- Eichler order in a rational quaternion algebra
- `\beta` -- element of `R` such that the homomorphism
sends `c\sqrt{D}` to `\beta`
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: i,j,k = H.quaternion_algebra().gens()
sage: import sage.schemes.elliptic_curves.heegner as heegner
sage: heegner.HeegnerQuatAlgEmbedding(-7, 2, R, -5*i+k)
Embedding sending 2*sqrt(-7) to -5*i + k
"""
self.__D = D
self.__c = c
self.__R = R
self.__beta = beta
def __eq__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 2, R)[0]
sage: f == H.optimal_embeddings(-7, 2, R)[0]
True
sage: f == H.optimal_embeddings(-7, 2, R)[1]
False
sage: f == 0
False
"""
return isinstance(other, HeegnerQuatAlgEmbedding) and \
self.__D == other.__D and \
self.__c == other.__c and \
self.__R == other.__R and \
self.__beta == other.__beta
def __ne__(self, other):
"""
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 2, R)[0]
sage: f != H.optimal_embeddings(-7, 2, R)[0]
False
sage: f != H.optimal_embeddings(-7, 2, R)[1]
True
sage: f != 0
True
"""
return not (self == other)
def __call__(self, x):
"""
Return image of `x` under this embedding.
INPUT:
- `x` -- element of the quadratic order
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 1, R)[1]; f
Embedding sending sqrt(-7) to -i + j + k
sage: a = f.domain_gen(); a^2
-7
sage: f(2 + 3*a)
2 - 3*i + 3*j + 3*k
sage: 2 + 3*f(a)
2 - 3*i + 3*j + 3*k
sage: f(a)^2
-7
"""
v = self.domain().number_field()(x).vector()
w = v * self.matrix()
z = self.codomain().quaternion_algebra()(w)
# There is no notion of an "element of an order" implemented
# for quaternion algebras right now. All elements are
# elements of the ambient rational quaternion algebra.
return z
@cached_method
def matrix(self):
r"""
Return matrix over `\QQ` of this morphism, with respect to the
basis 1, `c\sqrt{D}` of the domain and the basis `1,i,j,k` of
the ambient rational quaternion algebra (which contains the
domain).
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 1, R)[1]; f
Embedding sending sqrt(-7) to -i + j + k
sage: f.matrix()
[ 1 0 0 0]
[ 0 -1 1 1]
sage: f.conjugate().matrix()
[ 1 0 0 0]
[ 0 1 -1 -1]
"""
return matrix(QQ,2,4,[[1,0,0,0], self.__beta.coefficient_tuple()])
@cached_method
def domain(self):
"""
Return the domain of this embedding.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: H.optimal_embeddings(-7, 2, R)[0].domain()
Order in Number Field in a with defining polynomial x^2 + 7 with a = 2.645751311064591?*I
"""
R, a = quadratic_order(self.__D, self.__c)
# The following assumption is used, e.g., in the __call__
# method. I know that it is satisfied by the current
# implementation. But somebody might someday annoying change
# the implementation, and we want to catch that if it were to
# ever happen.
assert R.basis() == [1, a], "an assumption about construction of orders is violated"
self.__domain_gen = a
return R
def domain_gen(self):
r"""
Return the specific generator `c \sqrt{D}` for the domain
order.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 2, R)[0]
sage: f.domain_gen()
2*a
sage: f.domain_gen()^2
-28
"""
self.domain()
return self.__domain_gen
def domain_conductor(self):
"""
Return the conductor of the domain.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: H.optimal_embeddings(-7, 2, R)[0].domain_conductor()
2
"""
return self.__c
def beta(self):
r"""
Return the element `\beta` in the quaternion algebra order
that `c\sqrt{D}` maps to.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: H.optimal_embeddings(-7, 2, R)[1].beta()
-5*i + k
"""
return self.__beta
def codomain(self):
"""
Return the codomain of this embedding.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: H.optimal_embeddings(-7, 2, R)[0].codomain()
Order of Quaternion Algebra (-1, -3) with base ring Rational Field with basis (1/2 + 1/2*j + 7*k, 1/2*i + 13/2*k, j + 3*k, 11*k)
"""
return self.__R
@cached_method
def _repr_(self):
"""
Return string representation of this embedding.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 2, R)[1]; f._repr_()
'Embedding sending 2*sqrt(-7) to -5*i + k'
"""
a = '%ssqrt(%s)'%('%s*'%self.__c if self.__c > 1 else '', self.__D)
return "Embedding sending %s to %s"%(a, self.__beta)
def conjugate(self):
"""
Return the conjugate of this embedding, which is also an
embedding.
EXAMPLES::
sage: H = heegner_points(11).reduce_mod(3); R = H.left_orders()[0]
sage: f = H.optimal_embeddings(-7, 2, R)[1]
sage: f.conjugate()
Embedding sending 2*sqrt(-7) to 5*i - k
sage: f
Embedding sending 2*sqrt(-7) to -5*i + k
"""
return HeegnerQuatAlgEmbedding(self.__D, self.__c,
self.__R, self.__beta.conjugate())
#############################################################################
# Utility Functions
#############################################################################
def quadratic_order(D, c, names='a'):
r"""
Return order of conductor `c` in quadratic field with fundamental
discriminant `D`.
INPUT:
- `D` -- fundamental discriminant
- `c` -- conductor
- ``names`` -- string (default: 'a')
OUTPUT:
- order `R` of conductor `c` in an imaginary quadratic field
- the element `c\sqrt{D}` as an element of `R`
The generator for the field is named 'a' by default.
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.quadratic_order(-7,3)
(Order in Number Field in a with defining polynomial x^2 + 7 with a = 2.645751311064591?*I,
3*a)
sage: sage.schemes.elliptic_curves.heegner.quadratic_order(-7,3,'alpha')
(Order in Number Field in alpha with defining polynomial x^2 + 7 with alpha = 2.645751311064591?*I,
3*alpha)
"""
K = QuadraticField(D, names)
sqrtD = K.gen(0)
t = sqrtD * c
R = K.order([t])
return R, R(t)
def class_number(D):
"""
Return the class number of the quadratic field with fundamental
discriminant `D`.
INPUT:
- `D` -- integer
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.class_number(-20)
2
sage: sage.schemes.elliptic_curves.heegner.class_number(-23)
3
sage: sage.schemes.elliptic_curves.heegner.class_number(-163)
1
A ValueError is raised when `D` is not a fundamental
discriminant::
sage: sage.schemes.elliptic_curves.heegner.class_number(-5)
Traceback (most recent call last):
...
ValueError: D (=-5) must be a fundamental discriminant
"""
if not number_field.is_fundamental_discriminant(D):
raise ValueError("D (=%s) must be a fundamental discriminant" % D)
return QuadraticField(D, 'a').class_number()
def is_inert(D, p):
r"""
Return ``True`` if p is an inert prime in the field `\QQ(\sqrt{D})`.
INPUT:
- `D` -- fundamental discriminant
- `p` -- prime integer
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.is_inert(-7,3)
True
sage: sage.schemes.elliptic_curves.heegner.is_inert(-7,7)
False
sage: sage.schemes.elliptic_curves.heegner.is_inert(-7,11)
False
"""
K = QuadraticField(D,'a')
F = K.factor(p)
return len(F) == 1 and F[0][1] == 1
def is_split(D, p):
r"""
Return ``True`` if p is a split prime in the field `\QQ(\sqrt{D})`.
INPUT:
- `D` -- fundamental discriminant
- `p` -- prime integer
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.is_split(-7,3)
False
sage: sage.schemes.elliptic_curves.heegner.is_split(-7,7)
False
sage: sage.schemes.elliptic_curves.heegner.is_split(-7,11)
True
"""
K = QuadraticField(D,'a')
F = K.factor(p)
return len(F) == 2
def is_ramified(D, p):
r"""
Return ``True`` if p is a ramified prime in the field `\QQ(\sqrt{D})`.
INPUT:
- `D` -- fundamental discriminant
- `p` -- prime integer
EXAMPLES::
sage: sage.schemes.elliptic_curves.heegner.is_ramified(-7,2)
False
sage: sage.schemes.elliptic_curves.heegner.is_ramified(-7,7)
True
sage: sage.schemes.elliptic_curves.heegner.is_ramified(-1,2)
True
"""
return QuadraticField(D,'a').discriminant() % p == 0
def nearby_rational_poly(f, **kwds):
r"""
Return a polynomial whose coefficients are rational numbers close
to the coefficients of `f`.
INPUT:
- `f` -- polynomial with real floating point entries
- ``**kwds`` -- passed on to ``nearby_rational`` method
EXAMPLES::
sage: R.<x> = RR[]
sage: sage.schemes.elliptic_curves.heegner.nearby_rational_poly(2.1*x^2 + 3.5*x - 1.2, max_error=10e-16)
21/10*X^2 + 7/2*X - 6/5
sage: sage.schemes.elliptic_curves.heegner.nearby_rational_poly(2.1*x^2 + 3.5*x - 1.2, max_error=10e-17)
4728779608739021/2251799813685248*X^2 + 7/2*X - 5404319552844595/4503599627370496
sage: RR(4728779608739021/2251799813685248 - 21/10)
8.88178419700125e-17
"""
R = QQ['X']
return R([a.nearby_rational(**kwds) for a in f])
def simplest_rational_poly(f, prec):
"""
Return a polynomial whose coefficients are as simple as possible
rationals that are also close to the coefficients of f.
INPUT:
- `f` -- polynomial with real floating point entries
- ``prec`` -- positive integer
EXAMPLES::
sage: R.<x> = RR[]
sage: sage.schemes.elliptic_curves.heegner.simplest_rational_poly(2.1*x^2 + 3.5*x - 1.2, 53)
21/10*X^2 + 7/2*X - 6/5
"""
R = QQ['X']
Z = RealField(prec)
return R([Z(a).simplest_rational() for a in f])
def satisfies_weak_heegner_hypothesis(N, D):
r"""
Check that `D` satisfies the weak Heegner hypothesis relative to `N`.
This is all that is needed to define Heegner points.
The condition is that `D<0` is a fundamental discriminant and that
each unramified prime dividing `N` splits in `K=\QQ(\sqrt{D})` and
each ramified prime exactly divides `N`. We also do not require
that `D<-4`.
INPUT:
- `N` -- positive integer
- `D` -- negative integer
EXAMPLES::
sage: s = sage.schemes.elliptic_curves.heegner.satisfies_weak_heegner_hypothesis
sage: s(37,-7)
True
sage: s(37,-37)
False
sage: s(37,-37*4)
True
sage: s(100,-4)
False
sage: [D for D in [-1,-2,..,-40] if s(37,D)]
[-3, -4, -7, -11, -40]
sage: [D for D in [-1,-2,..,-100] if s(37,D)]
[-3, -4, -7, -11, -40, -47, -67, -71, -83, -84, -95]
sage: EllipticCurve('37a').heegner_discriminants_list(10)
[-7, -11, -40, -47, -67, -71, -83, -84, -95, -104]
"""
if not number_field.is_fundamental_discriminant(D):
return False
if D >= 0:
return False
for p, e in N.factor():
if D % p == 0:
if e > 1:
return False
elif D.kronecker(p) != 1:
return False
return True
def make_monic(f):
r"""
Return a monic integral polynomial `g` and an integer `d` such
that if `\alpha` is a root of `g`, then `\alpha/d` is a root of `f`.
In other words, `c f(x) = g(d x)` for some scalar `c`.
INPUT:
- f -- polynomial over the rational numbers
OUTPUT:
a monic integral polynomial and an integer
EXAMPLES::
sage: from sage.schemes.elliptic_curves.heegner import make_monic
sage: R.<x> = QQ[]
sage: make_monic(3*x^3 + 14*x^2 - 7*x + 5)
(x^3 + 14*x^2 - 21*x + 45, 3)
In this example we verify that ``make_monic`` does what we claim it does::
sage: K.<a> = NumberField(x^3 + 17*x - 3)
sage: f = (a/7+2/3).minpoly(); f
x^3 - 2*x^2 + 247/147*x - 4967/9261
sage: g, d = make_monic(f); (g, d)
(x^3 - 42*x^2 + 741*x - 4967, 21)
sage: K.<b> = NumberField(g)
sage: (b/d).minpoly()
x^3 - 2*x^2 + 247/147*x - 4967/9261
TESTS::
sage: f = x^5 + x^3/4 + 5
sage: make_monic(f)
(x^5 + x^3 + 160, 2)
Scalar factors do not matter, the result is always monic::
sage: make_monic(f * 1000000)
(x^5 + x^3 + 160, 2)
sage: make_monic(f / 1000000)
(x^5 + x^3 + 160, 2)
"""
R = f.parent()
n = f.degree()
lc = f[n]
d = ZZ.one()
for i in range(n):
expo = n - i
# We require that (d^expo * f[i] / lc) is an integer
den = (d**expo * f[i] / lc).denominator()
for p, e in factor_trial_division(den, 1000000):
# Round up e/expo
d *= p ** ((e + expo - 1) // expo)
g = R([d**(n-i) * f[i] / lc for i in range(n+1)])
return g, d
#####################################################################
# Elliptic curve methods
# Everywhere self below is an elliptic curve over QQ.
#####################################################################
def ell_heegner_point(self, D, c=ZZ(1), f=None, check=True):
r"""
Returns the Heegner point on this curve associated to the
quadratic imaginary field `K=\QQ(\sqrt{D})`.
If the optional parameter `c` is given, returns the higher Heegner
point associated to the order of conductor `c`.
INPUT:
- `D` -- a Heegner discriminant
- `c` -- (default: 1) conductor, must be coprime to `DN`
- `f` -- binary quadratic form or 3-tuple `(A,B,C)` of coefficients
of `AX^2 + BXY + CY^2`
- ``check`` -- bool (default: ``True``)
OUTPUT:
The Heegner point `y_c`.
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: E.heegner_discriminants_list(10)
[-7, -11, -40, -47, -67, -71, -83, -84, -95, -104]
sage: P = E.heegner_point(-7); P # indirect doctest
Heegner point of discriminant -7 on elliptic curve of conductor 37
sage: P.point_exact()
(0 : 0 : 1)
sage: P.curve()
Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
sage: P = E.heegner_point(-40).point_exact(); P
(a : -a + 1 : 1)
sage: P = E.heegner_point(-47).point_exact(); P
(a : a^4 + a - 1 : 1)
sage: P[0].parent()
Number Field in a with defining polynomial x^5 - x^4 + x^3 + x^2 - 2*x + 1
Working out the details manually::
sage: P = E.heegner_point(-47).numerical_approx(prec=200)
sage: f = algdep(P[0], 5); f
x^5 - x^4 + x^3 + x^2 - 2*x + 1
sage: f.discriminant().factor()
47^2
The Heegner hypothesis is checked::
sage: E = EllipticCurve('389a'); P = E.heegner_point(-5,7);
Traceback (most recent call last):
...
ValueError: N (=389) and D (=-5) must satisfy the Heegner hypothesis
We can specify the quadratic form::
sage: P = EllipticCurve('389a').heegner_point(-7, 5, (778,925,275)); P
Heegner point of discriminant -7 and conductor 5 on elliptic curve of conductor 389
sage: P.quadratic_form()
778*x^2 + 925*x*y + 275*y^2
"""
y = HeegnerPointOnX0N(self.conductor(), D, c, f, check=check)
return y.map_to_curve(self)
def kolyvagin_point(self, D, c=ZZ(1), check=True):
r"""
Return the Kolyvagin point on this curve associated to the
quadratic imaginary field `K=\QQ(\sqrt{D})` and conductor `c`.
INPUT:
- `D` -- a Heegner discriminant
- `c` -- (default: 1) conductor, must be coprime to `DN`
- ``check`` -- bool (default: ``True``)
OUTPUT:
The Kolyvagin point `P` of conductor `c`.
EXAMPLES::
sage: E = EllipticCurve('37a1')
sage: P = E.kolyvagin_point(-67); P
Kolyvagin point of discriminant -67 on elliptic curve of conductor 37
sage: P.numerical_approx() # abs tol 1e-14
(6.00000000000000 : -15.0000000000000 : 1.00000000000000)
sage: P.index()
6
sage: g = E((0,-1,1)) # a generator
sage: E.regulator() == E.regulator_of_points([g])
True
sage: 6*g
(6 : -15 : 1)
"""
return self.heegner_point(D,c,check=check).kolyvagin_point()
def ell_heegner_discriminants(self, bound):
"""
Return the list of self's Heegner discriminants between -1 and
-bound.
INPUT:
- ``bound (int)`` - upper bound for -discriminant
OUTPUT: The list of Heegner discriminants between -1 and -bound for
the given elliptic curve.
EXAMPLES::
sage: E=EllipticCurve('11a')
sage: E.heegner_discriminants(30) # indirect doctest
[-7, -8, -19, -24]
"""
return [-D for D in range(1, bound)
if self.satisfies_heegner_hypothesis(-D)]
def ell_heegner_discriminants_list(self, n):
"""
Return the list of self's first `n` Heegner discriminants smaller
than -5.
INPUT:
- ``n (int)`` - the number of discriminants to
compute
OUTPUT: The list of the first n Heegner discriminants smaller than
-5 for the given elliptic curve.
EXAMPLES::
sage: E=EllipticCurve('11a')
sage: E.heegner_discriminants_list(4) # indirect doctest
[-7, -8, -19, -24]
"""
v = []
D = -5
while len(v) < n:
while not self.satisfies_heegner_hypothesis(D):
D -= 1
v.append(D)
D -= 1
return v
def heegner_point_height(self, D, prec=2, check_rank=True):
r"""
Use the Gross-Zagier formula to compute the Neron-Tate canonical
height over `K` of the Heegner point corresponding to `D`, as an
interval (it is computed to some precision using `L`-functions).
If the curve has rank at least 2, then the returned height is the
exact Sage integer 0.
INPUT:
- ``D (int)`` - fundamental discriminant (=/= -3, -4)
- ``prec (int)`` - (default: 2), use `prec \cdot \sqrt(N) + 20`
terms of `L`-series in computations, where `N` is the
conductor.
- ``check_rank`` - whether to check if the rank is at least 2 by
computing the Mordell-Weil rank directly.
OUTPUT: Interval that contains the height of the Heegner point.
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: E.heegner_point_height(-7)
0.22227?
Some higher rank examples::
sage: E = EllipticCurve('389a')
sage: E.heegner_point_height(-7)
0
sage: E = EllipticCurve('5077a')
sage: E.heegner_point_height(-7)
0
sage: E.heegner_point_height(-7,check_rank=False)
0.0000?
"""
if not self.satisfies_heegner_hypothesis(D):
raise ArithmeticError("Discriminant (=%s) must be a fundamental discriminant that satisfies the Heegner hypothesis."%D)
if check_rank and self.rank() >= 2:
return ZZ(0)
if D == -3 or D == -4:
raise ArithmeticError("Discriminant (=%s) must not be -3 or -4."%D)
eps = self.root_number()
L1_vanishes = self.lseries().L1_vanishes()
IR = rings.RealIntervalField(20) # TODO: why 20 bits here?
if eps == 1 and L1_vanishes:
return IR(0) # rank even hence >= 2, so Heegner point is torsion.
RR = rings.RealField()
from math import sqrt
alpha = RR(sqrt(abs(D)))/(2*self.period_lattice().complex_area())
F = self.quadratic_twist(D)
E = self
k_E = prec*sqrt(E.conductor()) + 20
k_F = prec*sqrt(F.conductor()) + 20
MIN_ERR = RR('1e-6') # we assume that regulator and
# discriminant, etc., computed to this accuracy (which is easily the case).
# this should be made more intelligent / rigorous relative
# to the rest of the system.
if eps == 1: # E has even rank
LF1, err_F = F.lseries().deriv_at1(k_F)
LE1, err_E = E.lseries().at1(k_E)
err_F = max(err_F, MIN_ERR)
err_E = max(err_E, MIN_ERR)
return IR(alpha-MIN_ERR,alpha+MIN_ERR) * IR(LE1-err_E,LE1+err_E) * IR(LF1-err_F,LF1+err_F)
else: # E has odd rank
LE1, err_E = E.lseries().deriv_at1(k_E)
LF1, err_F = F.lseries().at1(k_F)
err_F = max(err_F, MIN_ERR)
err_E = max(err_E, MIN_ERR)
return IR(alpha-MIN_ERR,alpha+MIN_ERR) * IR(LE1-err_E,LE1+err_E) * IR(LF1-err_F,LF1+err_F)
def heegner_index(self, D, min_p=2, prec=5, descent_second_limit=12, verbose_mwrank=False, check_rank=True):
r"""
Return an interval that contains the index of the Heegner
point `y_K` in the group of `K`-rational points modulo torsion
on this elliptic curve, computed using the Gross-Zagier
formula and/or a point search, or possibly half the index
if the rank is greater than one.
If the curve has rank > 1, then the returned index is infinity.
.. NOTE::
If ``min_p`` is bigger than 2 then the index can be off by
any prime less than ``min_p``. This function returns the
index divided by `2` exactly when the rank of `E(K)` is
greater than 1 and `E(\QQ)_{/tor} \oplus E^D(\QQ)_{/tor}`
has index `2` in `E(K)_{/tor}`, where the second factor
undergoes a twist.
INPUT:
- ``D (int)`` - Heegner discriminant
- ``min_p (int)`` - (default: 2) only rule out primes
= min_p dividing the index.
- ``verbose_mwrank (bool)`` - (default: ``False``); print lots of
mwrank search status information when computing regulator
- ``prec (int)`` - (default: 5), use prec\*sqrt(N) +
20 terms of L-series in computations, where N is the conductor.
- ``descent_second_limit`` - (default: 12)- used in 2-descent
when computing regulator of the twist
- ``check_rank`` - whether to check if the rank is at least 2 by
computing the Mordell-Weil rank directly.
OUTPUT: an interval that contains the index, or half the index
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: E.heegner_discriminants(50)
[-7, -8, -19, -24, -35, -39, -40, -43]
sage: E.heegner_index(-7)
1.00000?
::
sage: E = EllipticCurve('37b')
sage: E.heegner_discriminants(100)
[-3, -4, -7, -11, -40, -47, -67, -71, -83, -84, -95]
sage: E.heegner_index(-95) # long time (1 second)
2.00000?
This tests doing direct computation of the Mordell-Weil group.
::
sage: EllipticCurve('675b').heegner_index(-11)
3.0000?
Currently discriminants -3 and -4 are not supported::
sage: E.heegner_index(-3)
Traceback (most recent call last):
...
ArithmeticError: Discriminant (=-3) must not be -3 or -4.
The curve 681b returns the true index, which is `3`::
sage: E = EllipticCurve('681b')
sage: I = E.heegner_index(-8); I
3.0000?
In fact, whenever the returned index has a denominator of
`2`, the true index is got by multiplying the returned
index by `2`. Unfortunately, this is not an if and only if
condition, i.e., sometimes the index must be multiplied by
`2` even though the denominator is not `2`.
This example demonstrates the ``descent_second_limit`` option,
which can be used to fine tune the 2-descent used to compute
the regulator of the twist::
sage: E = EllipticCurve([1,-1,0,-1228,-16267])
sage: E.heegner_index(-8)
Traceback (most recent call last):
...
RuntimeError: ...
However when we search higher, we find the points we need::
sage: E.heegner_index(-8, descent_second_limit=16, check_rank=False) # long time
2.00000?
Two higher rank examples (of ranks 2 and 3)::
sage: E = EllipticCurve('389a')
sage: E.heegner_index(-7)
+Infinity
sage: E = EllipticCurve('5077a')
sage: E.heegner_index(-7)
+Infinity
sage: E.heegner_index(-7, check_rank=False)
0.001?
sage: E.heegner_index(-7, check_rank=False).lower() == 0
True
"""
if not self.satisfies_heegner_hypothesis(D):
raise ArithmeticError("Discriminant (=%s) must be a fundamental discriminant that satisfies the Heegner hypothesis."%D)
if check_rank and self.rank() >= 2:
return rings.infinity
# First compute upper bound on height of Heegner point.
tm = verbose("computing heegner point height...")
h0 = self.heegner_point_height(D, prec=prec, check_rank=check_rank)
if h0 == 0:
return rings.infinity
# We divide by 2 to get the height **over Q** of the
# Heegner point on the twist.
ht = h0/2
verbose('Height of heegner point = %s'%ht, tm)
if self.root_number() == 1:
F = self.quadratic_twist(D)
else:
F = self
# Now rank(F) > 0
h = ht.upper()
verbose("Heegner height bound = %s"%h)
B = F.CPS_height_bound()
verbose("CPS bound = %s"%B)
c = h/(min_p**2) + B
verbose("Search would have to be up to height = %s"%c)
from .ell_rational_field import _MAX_HEIGHT
IR = rings.RealIntervalField(20) # todo: 20?
a = 1
if c > _MAX_HEIGHT or F is self:
verbose("Doing direct computation of MW group.")
reg = F.regulator(descent_second_limit=descent_second_limit, verbose=verbose_mwrank)
if F.rank(use_database=True) == 1:
z = F.gens()[0]
FK = F.base_extend(QuadraticField(D,'a'))
z = FK(z)
if z.is_divisible_by(2):
a = 2
else:
FK_even_tor_pts = [T for T in FK.torsion_subgroup().gens() if T.order()%2==0]
if len(FK_even_tor_pts) == 2:
FK_even_tor_pts.append(sum(FK_even_tor_pts))
for T in FK_even_tor_pts:
if (z + T).is_divisible_by(2):
a = 2; break
return a*self._adjust_heegner_index(ht/IR(reg))
# Do naive search to eliminate possibility that Heegner point
# is divisible by p<min_p, without finding Heegner point.
verbose("doing point search")
P = F.point_search(c)
verbose("done with point search")
P = [x for x in P if x.order() == rings.infinity]
a = 1
if len(P) == 0:
return IR(1)
elif len(P) == 1:
z = P[0]
FK = F.base_extend(QuadraticField(D,'a'))
z = FK(z)
if z.is_divisible_by(2):
a = 2
else:
FK_even_tor_pts = [T for T in FK.torsion_subgroup().gens() if T.order()%2==0]
if len(FK_even_tor_pts) == 2:
FK_even_tor_pts.append(sum(FK_even_tor_pts))
for T in FK_even_tor_pts:
if (z + T).is_divisible_by(2):
a = 2; break
verbose("saturating")
S, I, reg = F.saturation(P)
verbose("done saturating")
return a*self._adjust_heegner_index(ht/IR(reg))
def _adjust_heegner_index(self, a):
r"""
Take the square root of the interval that contains the Heegner
index.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: a = RIF(sqrt(2))-1.4142135623730951
sage: E._adjust_heegner_index(a)
1.?e-8
"""
if a.lower() < 0:
IR = rings.RealIntervalField(20) # todo: 20?
a = IR((0, a.upper()))
return a.sqrt()
def heegner_index_bound(self, D=0, prec=5, max_height=None):
r"""
Assume ``self`` has rank 0.
Return a list `v` of primes such that if an odd prime `p` divides
the index of the Heegner point in the group of rational points
modulo torsion, then `p` is in `v`.
If 0 is in the interval of the height of the Heegner point
computed to the given prec, then this function returns `v =
0`. This does not mean that the Heegner point is torsion, just
that it is very likely torsion.
If we obtain no information from a search up to ``max_height``,
e.g., if the Siksek et al. bound is bigger than ``max_height``,
then we return `v = -1`.
INPUT:
- ``D (int)`` - (default: 0) Heegner discriminant; if
0, use the first discriminant -4 that satisfies the Heegner
hypothesis
- ``verbose (bool)`` - (default: ``True``)
- ``prec (int)`` - (default: 5), use `prec \cdot \sqrt(N) + 20`
terms of `L`-series in computations, where `N` is the conductor.
- ``max_height (float)`` - should be = 21; bound on
logarithmic naive height used in point searches. Make smaller to
make this function faster, at the expense of possibly obtaining a
worse answer. A good range is between 13 and 21.
OUTPUT:
- ``v`` - list or int (bad primes or 0 or -1)
- ``D`` - the discriminant that was used (this is
useful if `D` was automatically selected).
- ``exact`` - either False, or the exact Heegner index
(up to factors of 2)
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: E.heegner_index_bound()
([2], -7, 2)
"""
from .ell_rational_field import _MAX_HEIGHT
if max_height is None:
max_height = _MAX_HEIGHT
else:
max_height = min(float(max_height), _MAX_HEIGHT)
if self.root_number() != 1:
raise RuntimeError("The rank must be 0.")
if D == 0:
D = -5
while not self.satisfies_heegner_hypothesis(D):
D -= 1
# First compute upper bound on Height of Heegner point.
ht = self.heegner_point_height(D, prec=prec)
if 0 in ht:
return 0, D, False
F = self.quadratic_twist(D)
h = ht.upper()
verbose("Heegner height bound = %s"%h)
B = F.CPS_height_bound()
verbose("CPS bound = %s"%B)
if self.two_torsion_rank() == 0:
H = h
else:
H = 4*h
p = 3
from sage.all import next_prime
while True:
c = H/(2*p**2) + B
if c < max_height:
break
if p > 100:
break
p = next_prime(p)
verbose("Using p = %s"%p)
if c > max_height:
verbose("No information by searching only up to max_height (=%s)."%c)
return -1, D, False
verbose("Searching up to height = %s"%c)
eps = 10e-5
def _bound(P):
"""
We will use this function below in two places. It bounds the index
using a nontrivial point.
"""
assert len(P) == 1
S, I, reg = F.saturation(P)
IR = rings.RealIntervalField(20) # todo: 20?
h = IR(reg-eps,reg+eps)
ind2 = ht/(h/2)
verbose("index squared = %s"%ind2)
ind = ind2.sqrt()
verbose("index = %s"%ind)
# Compute upper bound on square root of index.
if ind.absolute_diameter() < 1:
t, i = ind.is_int()
if t: # unique integer in interval, so we've found exact index squared.
return prime_divisors(i), D, i
raise RuntimeError("Unable to compute bound for e=%s, D=%s (try increasing precision)"%(self, D))
# First try a quick search, in case we get lucky and find
# a generator.
P = F.point_search(13, rank_bound=1)
P = [x for x in P if x.order() == rings.infinity]
if len(P) > 0:
return _bound(P)
# Do search to eliminate possibility that Heegner point is
# divisible by primes up to p, without finding Heegner point.
P = F.point_search(c, rank_bound=1)
P = [x for x in P if x.order() == rings.infinity]
if len(P) == 0:
# We've eliminated the possibility of a divisor up to p.
return rings.prime_range(3, p), D, False
else:
return _bound(P)
#################################################################################
def _heegner_index_in_EK(self, D):
r"""
Return the index of the sum of `E(\QQ)/tor + E^D(\QQ)/tor` in `E(K)/tor`.
INPUT:
- `D` -- negative integer; the Heegner discriminant
OUTPUT:
a power of 2 -- the given index
EXAMPLES:
We compute the index for a rank 2 curve and found that it is 2::
sage: E = EllipticCurve('389a')
sage: E._heegner_index_in_EK(-7)
2
We explicitly verify in the above example that indeed that
index is divisible by 2 by writing down a generator of
`E(\QQ)/tor + E^D(\QQ)/tor` that is divisible by 2 in `E(K)`::
sage: F = E.quadratic_twist(-7)
sage: K = QuadraticField(-7,'a')
sage: G = E.change_ring(K)
sage: phi = F.change_ring(K).isomorphism_to(G)
sage: P = G(E(-1,1)) + G((0,-1)) + G(phi(F(14,25))); P
(-867/3872*a - 3615/3872 : -18003/170368*a - 374575/170368 : 1)
sage: P.division_points(2)
[(1/8*a + 5/8 : -5/16*a - 9/16 : 1)]
"""
# check conditions, then use cache if possible.
if not self.satisfies_heegner_hypothesis(D):
raise ValueError("D (=%s) must satisfy the Heegner hypothesis"%D)
try:
return self.__heegner_index_in_EK[D]
except AttributeError:
self.__heegner_index_in_EK = {}
except KeyError:
pass
#####################################################################
# THE ALGORITHM:
#
# For an element P of an abelian group A, let [P] denote the
# equivalence class of P in the quotient A/A_tor of A by
# its torsion subgroup. Then for P in E(Q) + E^D(QQ), we
# have that [P] is divisible by 2 in E(K)/tor if and only
# there is R in E(K) such that 2*[R] = [P], and this is
# only if there is R in E(K) and t in E(K)_tor such that
# 2*R = P + t.
#
# Using complex conjugation, one sees that the quotient
# group E(K)/tor / ( E(Q)/tor + E^D(Q)/tor ) is killed by 2.
# So to compute the order of this group we run through
# representatives P for A/(2A) where A = E(Q)/tor + E^D(Q)/tor,
# and for each we see whether there is a torsion point t in E(K)
# such that P + t is divisible by 2. Also, we have
# 2 | P+t <==> 2 | P+n*t for any odd integer n,
# so we may assume t is of 2-power order.
#####################################################################
E = self # nice shortcut
F = E.quadratic_twist(D).minimal_model()
K = rings.QuadraticField(D, 'a')
# Define a map phi that we'll use to put the points of E^D(QQ)
# into E(K):
G = E.change_ring(K)
G2 = F.change_ring(K)
phi = G2.isomorphism_to(G)
# Basis for E(Q)/tor oplus E^D(QQ)/tor in E(K):
basis = [G(z) for z in E.gens()] + [G(phi(z)) for z in F.gens()]
# Make a list of the 2-power order torsion points in E(K), including 0.
T = [G(z) for z in G.torsion_subgroup().list() if z.order() == 1 or
((z.order() % 2 == 0 and len(z.order().factor()) == 1))]
r = len(basis) # rank
V = rings.QQ**r
B = []
# Iterate through reps for A/(2*A) creating vectors in (1/2)*ZZ^r
for v in rings.GF(2)**r:
if not v: continue
P = sum([basis[i] for i in range(r) if v[i]])
for t in T:
if (P+t).is_divisible_by(2):
B.append(V(v)/2)
A = rings.ZZ**r
# Take span of our vectors in (1/2)*ZZ^r, along with ZZ^r. This is E(K)/tor.
W = V.span(B, rings.ZZ) + A
# Compute the index in E(K)/tor of A = E(Q)/tor + E^D(Q)/tor, cache, and return.
index = A.index_in(W)
self.__heegner_index_in_EK[D] = index
return index
def heegner_sha_an(self, D, prec=53):
r"""
Return the conjectural (analytic) order of Sha for E over the field `K=\QQ(\sqrt{D})`.
INPUT:
- `D` -- negative integer; the Heegner discriminant
- prec -- integer (default: 53); bits of precision to
compute analytic order of Sha
OUTPUT:
(floating point number) an approximation to the conjectural order of Sha.
.. NOTE::
Often you'll want to do ``proof.elliptic_curve(False)`` when
using this function, since often the twisted elliptic
curves that come up have enormous conductor, and Sha is
nontrivial, which makes provably finding the Mordell-Weil
group using 2-descent difficult.
EXAMPLES:
An example where E has conductor 11::
sage: E = EllipticCurve('11a')
sage: E.heegner_sha_an(-7) # long time
1.00000000000000
The cache works::
sage: E.heegner_sha_an(-7) is E.heegner_sha_an(-7) # long time
True
Lower precision::
sage: E.heegner_sha_an(-7,10) # long time
1.0
Checking that the cache works for any precision::
sage: E.heegner_sha_an(-7,10) is E.heegner_sha_an(-7,10) # long time
True
Next we consider a rank 1 curve with nontrivial Sha over the
quadratic imaginary field `K`; however, there is no Sha for `E`
over `\QQ` or for the quadratic twist of `E`::
sage: E = EllipticCurve('37a')
sage: E.heegner_sha_an(-40) # long time
4.00000000000000
sage: E.quadratic_twist(-40).sha().an() # long time
1
sage: E.sha().an() # long time
1
A rank 2 curve::
sage: E = EllipticCurve('389a') # long time
sage: E.heegner_sha_an(-7) # long time
1.00000000000000
If we remove the hypothesis that `E(K)` has rank 1 in Conjecture
2.3 in [GZ1986]_ page 311, then that conjecture is
false, as the following example shows::
sage: E = EllipticCurve('65a') # long time
sage: E.heegner_sha_an(-56) # long time
1.00000000000000
sage: E.torsion_order() # long time
2
sage: E.tamagawa_product() # long time
1
sage: E.quadratic_twist(-56).rank() # long time
2
"""
# check conditions, then return from cache if possible.
if not self.satisfies_heegner_hypothesis(D):
raise ValueError("D (=%s) must satisfy the Heegner hypothesis"%D)
try:
return self.__heegner_sha_an[(D, prec)]
except AttributeError:
self.__heegner_sha_an = {}
except KeyError:
pass
# Use the BSD conjecture over the quadratic imaginary K --
# see page 311 of [GZ1986]_ for the formula.
E = self # notational convenience
F = E.quadratic_twist(D).minimal_model()
K = rings.QuadraticField(D, 'a')
# Compute each of the quantities in BSD
# - The torsion subgroup over K.
T = E.change_ring(K).torsion_order()
# - The product of the Tamagawa numbers, which because D is
# coprime to N is just the square of the product of the
# Tamagawa numbers over QQ for E. (we square below in the
# BSD formula)
cqprod = E.tamagawa_product()
# - The leading term of the L-series, as a product of two
# other L-series.
rE = E.rank()
rF = F.rank()
L_E = E.lseries().dokchitser(prec).derivative(1, rE)
L_F = F.lseries().dokchitser(prec).derivative(1, rF)
# NOTE: The binomial coefficient in the following formula
# for the leading term in terms of the other two leading
# terms comes from the product rule for the derivative.
# You can think this through or just type something like
# f = function('f',x); g = function('g',x); diff(f*g,6)
# into Sage to be convinced.
L = binomial(rE + rF, rE) * (L_E * L_F / factorial(rE+rF) )
# - ||omega||^2 -- the period. It is twice the volume of the
# period lattice. See the following paper for a derivation:
# "Verification of the Birch and Swinnerton-Dyer Conjecture
# for Specific Elliptic Curves", G. Grigorov, A. Jorza, S. Patrikis,
# C. Patrascu, W. Stein
omega = 2 * abs(E.period_lattice().basis_matrix().det())
# - The regulator.
# First we compute the regulator of the subgroup E(QQ) + E^D(QQ)
# of E(K). The factor of 2 in the regulator
# accounts for the fact that the height over K is twice the
# height over QQ, i.e., for P in E(QQ) we have h_K(P,P) =
# 2*h_Q(P,P). See, e.g., equation (6.4) on page 230 of
# [GZ1986]_.
Reg_prod = 2**(rE + rF) * E.regulator(precision=prec) * F.regulator(precision=prec)
# Next we call off to the _heegner_index_in_EK function, which
# saturates the group E(QQ) + E^D(QQ) in E(K), given us the index,
# which must be a power of 2, since E(QQ) is the +1 eigenspace for
# complex conjugation, and E^D(QQ) is the -1 eigenspace.
ind = self._heegner_index_in_EK(D)
# Finally, we know the regulator of E(K).
Reg = Reg_prod / ind**2
# - Square root of the absolute value of the discriminant. This is
# easy; we just make sure the D passed in is an integer, so we
# can call sqrt with the chosen precision.
sqrtD = ZZ(abs(D)).sqrt(prec=prec)
# - Done: Finally, we plug everything into the BSD formula to get the
# analytic order of Sha.
sha_an = (L * T**2 * sqrtD) / (omega * Reg * cqprod**2)
# - We cache and return the answer.
self.__heegner_sha_an[(D, prec)] = sha_an
return sha_an
def _heegner_forms_list(self, D, beta=None, expected_count=None):
r"""
Returns a list of quadratic forms corresponding to Heegner points
with discriminant `D` and a choice of `\beta` a square root of
`D` mod `4N`. Specifically, given a quadratic form
`f = Ax^2 + Bxy + Cy^2` we let `\tau_f` be a root of `Ax^2 + Bx + C`
and the discriminant `\Delta(\tau_f) = \Delta(f) = D` must be
invariant under multiplication by `N`, the conductor of ``self``.
`\Delta(N\tau_f) = \Delta(\tau_f) = \Delta(f) = D`
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: E._heegner_forms_list(-7)
[37*x^2 + 17*x*y + 2*y^2]
sage: E._heegner_forms_list(-195)
[37*x^2 + 29*x*y + 7*y^2, 259*x^2 + 29*x*y + y^2, 111*x^2 + 177*x*y + 71*y^2, 2627*x^2 + 177*x*y + 3*y^2]
sage: E._heegner_forms_list(-195)[-1].discriminant()
-195
sage: len(E._heegner_forms_list(-195))
4
sage: QQ[sqrt(-195)].class_number()
4
sage: E = EllipticCurve('389a')
sage: E._heegner_forms_list(-7)
[389*x^2 + 185*x*y + 22*y^2]
sage: E._heegner_forms_list(-59)
[389*x^2 + 313*x*y + 63*y^2, 1167*x^2 + 313*x*y + 21*y^2, 3501*x^2 + 313*x*y + 7*y^2]
"""
if expected_count is None:
expected_count = number_field.QuadraticField(D, 'a').class_number()
N = self.conductor()
if beta is None:
beta = Integers(4*N)(D).sqrt(extend=False)
else:
assert beta**2 == Integers(4*N)(D)
from sage.quadratic_forms.all import BinaryQF
b = ZZ(beta) % (2*N)
all = []
seen = []
# Note: This may give a sub-optimal list of forms.
while True:
R = (b**2-D)//(4*N)
for d in R.divisors():
f = BinaryQF([d*N, b, R//d])
fr = f.reduced_form()
if fr not in seen:
seen.append(fr)
all.append(f)
if len(all) == expected_count:
return all
b += 2*N
def _heegner_best_tau(self, D, prec=None):
r"""
Given a discriminant `D`, find the Heegner point `\tau` in the
upper half plane with largest imaginary part (which is optimal
for evaluating the modular parametrization). If the optional
parameter ``prec`` is given, return `\tau` to ``prec`` bits of
precision, otherwise return it exactly as a symbolic object.
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: E._heegner_best_tau(-7)
1/74*sqrt(-7) - 17/74
sage: EllipticCurve('389a')._heegner_best_tau(-11)
1/778*sqrt(-11) - 355/778
sage: EllipticCurve('389a')._heegner_best_tau(-11, prec=100)
-0.45629820051413881748071979434 + 0.0042630138693514136878083968338*I
"""
# We know that N|A, so A = N is optimal.
N = self.conductor()
b = ZZ(Integers(4*N)(D).sqrt(extend=False) % (2*N))
# TODO: make sure a different choice of b is not better?
return (-b + ZZ(D).sqrt(prec=prec)) / (2*N)
def satisfies_heegner_hypothesis(self, D):
"""
Returns ``True`` precisely when `D` is a fundamental discriminant that
satisfies the Heegner hypothesis for this elliptic curve.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: E.satisfies_heegner_hypothesis(-7)
True
sage: E.satisfies_heegner_hypothesis(-11)
False
"""
if not number_field.is_fundamental_discriminant(D):
return False
D = ZZ(D)
if D >= 0:
return False
if D.gcd(self.conductor()) != 1:
return False
for p, _ in self.conductor().factor():
if D.kronecker(p) != 1:
return False
return True
#####################################################################
# End of elliptic curve methods.
#####################################################################
|
py | b4123ae1a7b8d13790c8376e5df376e6f53d5e02 | from transitions import Machine
import time
import keyboard
import time
import os
import random
import cv2
from copy import copy
from typing import Union
from collections import OrderedDict
from utils.misc import wait
from game_stats import GameStats
from logger import Logger
from config import Config
from screen import Screen
from template_finder import TemplateFinder
from char import IChar
from item import ItemFinder
from item.pickit import PickIt
from ui import UiManager
from ui import BeltManager
from pather import Pather, Location
from npc_manager import NpcManager
from health_manager import HealthManager
from death_manager import DeathManager
from char.sorceress import LightSorc, BlizzSorc, NovaSorc
from char.trapsin import Trapsin
from char.hammerdin import Hammerdin
from char.barbarian import Barbarian
from char.necro import Necro
from char.basic import Basic
from char.basic_ranged import Basic_Ranged
from run import Pindle, ShenkEld, Trav, Nihlathak, Arcane, Diablo
from town import TownManager, A1, A2, A3, A4, A5
# Added for dclone ip hunt
from messages import Messenger
from utils.dclone_ip import get_d2r_game_ip
class Bot:
def __init__(self, screen: Screen, game_stats: GameStats, template_finder: TemplateFinder, pick_corpse: bool = False):
self._screen = screen
self._game_stats = game_stats
self._messenger = Messenger()
self._config = Config()
self._template_finder = template_finder
self._item_finder = ItemFinder()
self._ui_manager = UiManager(self._screen, self._template_finder, self._game_stats)
self._belt_manager = BeltManager(self._screen, self._template_finder)
self._pather = Pather(self._screen, self._template_finder)
self._pickit = PickIt(self._screen, self._item_finder, self._ui_manager, self._belt_manager)
# Create Character
if self._config.char["type"] in ["sorceress", "light_sorc"]:
self._char: IChar = LightSorc(self._config.light_sorc, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "blizz_sorc":
self._char: IChar = BlizzSorc(self._config.blizz_sorc, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "nova_sorc":
self._char: IChar = NovaSorc(self._config.nova_sorc, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "hammerdin":
self._char: IChar = Hammerdin(self._config.hammerdin, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "trapsin":
self._char: IChar = Trapsin(self._config.trapsin, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "barbarian":
self._char: IChar = Barbarian(self._config.barbarian, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "necro":
self._char: IChar = Necro(self._config.necro, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "basic":
self._char: IChar = Basic(self._config.basic, self._screen, self._template_finder, self._ui_manager, self._pather)
elif self._config.char["type"] == "basic_ranged":
self._char: IChar = Basic_Ranged(self._config.basic_ranged, self._screen, self._template_finder, self._ui_manager, self._pather)
else:
Logger.error(f'{self._config.char["type"]} is not supported! Closing down bot.')
os._exit(1)
# Create Town Manager
npc_manager = NpcManager(screen, self._template_finder)
a5 = A5(self._screen, self._template_finder, self._pather, self._char, npc_manager)
a4 = A4(self._screen, self._template_finder, self._pather, self._char, npc_manager)
a3 = A3(self._screen, self._template_finder, self._pather, self._char, npc_manager)
a2 = A2(self._screen, self._template_finder, self._pather, self._char, npc_manager)
a1 = A1(self._screen, self._template_finder, self._pather, self._char, npc_manager)
self._town_manager = TownManager(self._template_finder, self._ui_manager, self._item_finder, a1, a2, a3, a4, a5)
self._route_config = self._config.routes
self._route_order = self._config.routes_order
# Create runs
if self._route_config["run_shenk"] and not self._route_config["run_eldritch"]:
Logger.error("Running shenk without eldtritch is not supported. Either run none or both")
os._exit(1)
self._do_runs = {
"run_trav": self._route_config["run_trav"],
"run_pindle": self._route_config["run_pindle"],
"run_shenk": self._route_config["run_shenk"] or self._route_config["run_eldritch"],
"run_nihlathak": self._route_config["run_nihlathak"],
"run_arcane": self._route_config["run_arcane"],
"run_diablo": self._route_config["run_diablo"],
}
# Adapt order to the config
self._do_runs = OrderedDict((k, self._do_runs[k]) for k in self._route_order if k in self._do_runs and self._do_runs[k])
self._do_runs_reset = copy(self._do_runs)
Logger.info(f"Doing runs: {self._do_runs_reset.keys()}")
if self._config.general["randomize_runs"]:
self.shuffle_runs()
self._pindle = Pindle(self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)
self._shenk = ShenkEld(self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)
self._trav = Trav(self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)
self._nihlathak = Nihlathak(self._screen, self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)
self._arcane = Arcane(self._screen, self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)
self._diablo = Diablo(self._screen, self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)
# Create member variables
self._pick_corpse = pick_corpse
self._picked_up_items = False
self._curr_loc: Union[bool, Location] = None
self._tps_left = 10 # assume half full tp book
self._pre_buffed = False
self._stopping = False
self._pausing = False
self._current_threads = []
self._no_stash_counter = 0
self._ran_no_pickup = False
# Create State Machine
self._states=['hero_selection', 'town', 'pindle', 'shenk', 'trav', 'nihlathak', 'arcane', 'diablo']
self._transitions = [
{ 'trigger': 'create_game', 'source': 'hero_selection', 'dest': 'town', 'before': "on_create_game"},
# Tasks within town
{ 'trigger': 'maintenance', 'source': 'town', 'dest': 'town', 'before': "on_maintenance"},
# Different runs
{ 'trigger': 'run_pindle', 'source': 'town', 'dest': 'pindle', 'before': "on_run_pindle"},
{ 'trigger': 'run_shenk', 'source': 'town', 'dest': 'shenk', 'before': "on_run_shenk"},
{ 'trigger': 'run_trav', 'source': 'town', 'dest': 'trav', 'before': "on_run_trav"},
{ 'trigger': 'run_nihlathak', 'source': 'town', 'dest': 'nihlathak', 'before': "on_run_nihlathak"},
{ 'trigger': 'run_arcane', 'source': 'town', 'dest': 'arcane', 'before': "on_run_arcane"},
{ 'trigger': 'run_diablo', 'source': 'town', 'dest': 'nihlathak', 'before': "on_run_diablo"},
# End run / game
{ 'trigger': 'end_run', 'source': ['shenk', 'pindle', 'nihlathak', 'trav', 'arcane', 'diablo'], 'dest': 'town', 'before': "on_end_run"},
{ 'trigger': 'end_game', 'source': ['town', 'shenk', 'pindle', 'nihlathak', 'trav', 'arcane', 'diablo','end_run'], 'dest': 'hero_selection', 'before': "on_end_game"},
]
self.machine = Machine(model=self, states=self._states, initial="hero_selection", transitions=self._transitions, queued=True)
def draw_graph(self):
# Draw the whole graph, graphviz binaries must be installed and added to path for this!
from transitions.extensions import GraphMachine
self.machine = GraphMachine(model=self, states=self._states, initial="hero_selection", transitions=self._transitions, queued=True)
self.machine.get_graph().draw('my_state_diagram.png', prog='dot')
def get_belt_manager(self) -> BeltManager:
return self._belt_manager
def get_curr_location(self):
return self._curr_loc
def start(self):
self.trigger('create_game')
def stop(self):
self._stopping = True
def toggle_pause(self):
self._pausing = not self._pausing
if self._pausing:
Logger.info(f"Pause at next state change...")
else:
Logger.info(f"Resume")
self._game_stats.resume_timer()
def trigger_or_stop(self, name: str, **kwargs):
if self._pausing:
Logger.info(f"{self._config.general['name']} is now pausing")
self._game_stats.pause_timer()
while self._pausing:
time.sleep(0.2)
if not self._stopping:
self.trigger(name, **kwargs)
def current_game_length(self):
return self._game_stats.get_current_game_length()
def shuffle_runs(self):
tmp = list(self._do_runs.items())
random.shuffle(tmp)
self._do_runs = OrderedDict(tmp)
def is_last_run(self):
found_unfinished_run = False
for key in self._do_runs:
if self._do_runs[key]:
found_unfinished_run = True
break
return not found_unfinished_run
def on_create_game(self):
keyboard.release(self._config.char["stand_still"])
# Start a game from hero selection
self._game_stats.log_start_game()
self._template_finder.search_and_wait(["MAIN_MENU_TOP_LEFT","MAIN_MENU_TOP_LEFT_DARK"], roi=self._config.ui_roi["main_menu_top_left"])
if not self._ui_manager.start_game(): return
self._curr_loc = self._town_manager.wait_for_town_spawn()
# Check for the current game ip and pause if we are able to obtain the hot ip
if self._config.dclone["region_ips"] != "" and self._config.dclone["dclone_hotip"] != "":
cur_game_ip = get_d2r_game_ip()
hot_ip = self._config.dclone["dclone_hotip"]
Logger.debug(f"Current Game IP: {cur_game_ip} and HOTIP: {hot_ip}")
if hot_ip == cur_game_ip:
self._messenger.send_message(f"Dclone IP Found on IP: {cur_game_ip}")
print("Press Enter")
input()
os._exit(1)
else:
Logger.info(f"Please Enter the region ip and hot ip on config to use")
# Run /nopickup command to avoid picking up stuff on accident
if not self._ran_no_pickup:
self._ran_no_pickup = True
if self._ui_manager.enable_no_pickup():
Logger.info("Activated /nopickup")
else:
Logger.error("Failed to detect if /nopickup command was applied or not")
self.trigger_or_stop("maintenance")
def on_maintenance(self):
# Handle picking up corpse in case of death
if self._pick_corpse:
self._pick_corpse = False
time.sleep(1.6)
DeathManager.pick_up_corpse(self._screen)
wait(1.2, 1.5)
self._belt_manager.fill_up_belt_from_inventory(self._config.char["num_loot_columns"])
wait(0.5)
# Look at belt to figure out how many pots need to be picked up
self._belt_manager.update_pot_needs()
# Check if should need some healing
img = self._screen.grab()
buy_pots = self._belt_manager.should_buy_pots()
if HealthManager.get_health(img) < 0.6 or HealthManager.get_mana(img) < 0.2 or buy_pots:
if buy_pots:
Logger.info("Buy pots at next possible Vendor")
pot_needs = self._belt_manager.get_pot_needs()
self._curr_loc = self._town_manager.buy_pots(self._curr_loc, pot_needs["health"], pot_needs["mana"])
wait(0.5, 0.8)
self._belt_manager.update_pot_needs()
# TODO: Remove this, currently workaround cause too lazy to add all the pathes from MALAH
if self._curr_loc == Location.A5_MALAH:
if self._pather.traverse_nodes((Location.A5_MALAH, Location.A5_TOWN_START), self._char, force_move=True):
self._curr_loc = Location.A5_TOWN_START
else:
self._curr_loc = False
else:
Logger.info("Healing at next possible Vendor")
self._curr_loc = self._town_manager.heal(self._curr_loc)
if not self._curr_loc:
return self.trigger_or_stop("end_game", failed=True)
# Check if we should force stash (e.g. when picking up items by accident or after failed runs or chicken/death)
force_stash = False
self._no_stash_counter += 1
if not self._picked_up_items and (self._no_stash_counter > 4 or self._pick_corpse):
self._no_stash_counter = 0
force_stash = self._ui_manager.should_stash(self._config.char["num_loot_columns"])
# Stash stuff, either when item was picked up or after X runs without stashing because of unwanted loot in inventory
if self._picked_up_items or force_stash:
if self._config.char["id_items"]:
Logger.info("Identifying items")
self._curr_loc = self._town_manager.identify(self._curr_loc)
if not self._curr_loc:
return self.trigger_or_stop("end_game", failed=True)
Logger.info("Stashing items")
self._curr_loc = self._town_manager.stash(self._curr_loc)
if not self._curr_loc:
return self.trigger_or_stop("end_game", failed=True)
self._no_stash_counter = 0
self._picked_up_items = False
wait(1.0)
# Check if we are out of tps or need repairing
need_repair = self._ui_manager.repair_needed()
if self._tps_left < random.randint(3, 5) or need_repair or self._config.char["always_repair"]:
if need_repair: Logger.info("Repair needed. Gear is about to break")
else: Logger.info("Repairing and buying TPs at next Vendor")
self._curr_loc = self._town_manager.repair_and_fill_tps(self._curr_loc)
if not self._curr_loc:
return self.trigger_or_stop("end_game", failed=True)
self._tps_left = 20
wait(1.0)
# Check if merc needs to be revived
merc_alive = self._template_finder.search(["MERC_A2","MERC_A1","MERC_A5","MERC_A3"], self._screen.grab(), threshold=0.9, roi=self._config.ui_roi["merc_icon"]).valid
if not merc_alive and self._config.char["use_merc"]:
Logger.info("Resurrect merc")
self._game_stats.log_merc_death()
self._curr_loc = self._town_manager.resurrect(self._curr_loc)
if not self._curr_loc:
return self.trigger_or_stop("end_game", failed=True)
# Start a new run
started_run = False
for key in self._do_runs:
if self._do_runs[key]:
self.trigger_or_stop(key)
started_run = True
break
if not started_run:
self.trigger_or_stop("end_game")
def on_end_game(self, failed: bool = False):
if self._config.general["info_screenshots"] and failed:
cv2.imwrite("./info_screenshots/info_failed_game_" + time.strftime("%Y%m%d_%H%M%S") + ".png", self._screen.grab())
self._curr_loc = False
self._pre_buffed = False
self._ui_manager.save_and_exit()
self._game_stats.log_end_game(failed=failed)
self._do_runs = copy(self._do_runs_reset)
if self._config.general["randomize_runs"]:
self.shuffle_runs()
wait(0.2, 0.5)
self.trigger_or_stop("create_game")
def on_end_run(self):
if not self._config.char["pre_buff_every_run"]:
self._pre_buffed = True
success = self._char.tp_town()
if success:
self._tps_left -= 1
self._curr_loc = self._town_manager.wait_for_tp(self._curr_loc)
if self._curr_loc:
return self.trigger_or_stop("maintenance")
if not self._ui_manager.has_tps():
self._tps_left = 0
self.trigger_or_stop("end_game", failed=True)
# All the runs go here
# ==================================
def _ending_run_helper(self, res: Union[bool, tuple[Location, bool]]):
# either fill member variables with result data or mark run as failed
failed_run = True
if res:
failed_run = False
self._curr_loc, self._picked_up_items = res
# in case its the last run or the run was failed, end game, otherwise move to next run
if self.is_last_run() or failed_run:
if failed_run:
self._no_stash_counter = 10 # this will force a check if we should stash on next game
self.trigger_or_stop("end_game", failed=failed_run)
else:
self.trigger_or_stop("end_run")
def on_run_pindle(self):
res = False
self._do_runs["run_pindle"] = False
self._game_stats.update_location("Pin" if self._config.general['discord_status_condensed'] else "Pindle")
self._curr_loc = self._pindle.approach(self._curr_loc)
if self._curr_loc:
res = self._pindle.battle(not self._pre_buffed)
self._ending_run_helper(res)
def on_run_shenk(self):
res = False
self._do_runs["run_shenk"] = False
self._curr_loc = self._shenk.approach(self._curr_loc)
if self._curr_loc:
res = self._shenk.battle(self._route_config["run_shenk"], not self._pre_buffed, self._game_stats)
self._ending_run_helper(res)
def on_run_trav(self):
res = False
self._do_runs["run_trav"] = False
self._game_stats.update_location("Trav" if self._config.general['discord_status_condensed'] else "Travincal")
self._curr_loc = self._trav.approach(self._curr_loc)
if self._curr_loc:
res = self._trav.battle(not self._pre_buffed)
self._ending_run_helper(res)
def on_run_nihlathak(self):
res = False
self._do_runs["run_nihlathak"] = False
self._game_stats.update_location("Nihl" if self._config.general['discord_status_condensed'] else "Nihlathak")
self._curr_loc = self._nihlathak.approach(self._curr_loc)
if self._curr_loc:
res = self._nihlathak.battle(not self._pre_buffed)
self._ending_run_helper(res)
def on_run_arcane(self):
res = False
self._do_runs["run_arcane"] = False
self._game_stats.update_location("Arc" if self._config.general['discord_status_condensed'] else "Arcane")
self._curr_loc = self._arcane.approach(self._curr_loc)
if self._curr_loc:
res = self._arcane.battle(not self._pre_buffed)
self._tps_left -= self._arcane.used_tps
self._ending_run_helper(res)
def on_run_diablo(self):
res = False
self._do_runs["run_diablo"] = False
self._game_stats.update_location("Dia" if self._config.general['discord_status_condensed'] else "Diablo")
self._curr_loc = self._diablo.approach(self._curr_loc)
if self._curr_loc:
res = self._diablo.battle(not self._pre_buffed)
self._tps_left -= 1 # we use one tp at pentagram for calibration
self._ending_run_helper(res)
|
py | b4123b5686c406ba123b4f27395c2cf666d25f6b | from karel.stanfordkarel import *
def main():
pass
if __name__ == "__main__":
run_karel_program("1x1.w")
|
py | b4123bbabb1afb1023eee56787e74e6aaa7c2e1a |
import os
from functools import partial
import webbrowser
from PySide2 import QtCore, QtWidgets
from maya import cmds
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from ncachefactory.nodetable import DynamicNodesTableWidget
from ncachefactory.comparator import ComparisonWidget
from ncachefactory.ncache import DYNAMIC_NODES
from ncachefactory.cacheoptions import CacheOptions
from ncachefactory.qtutils import get_icon
from ncachefactory.playblastoptions import PlayblastOptions
from ncachefactory.cachemanager import (
filter_connected_cacheversions, create_and_record_cacheversion,
record_in_existing_cacheversion, append_to_cacheversion)
from ncachefactory.infos import WorkspaceCacheversionsExplorer
from ncachefactory.versioning import (
ensure_workspace_folder_exists, list_available_cacheversions,
filter_cacheversions_containing_nodes, cacheversion_contains_node)
from ncachefactory.optionvars import (
CACHEOPTIONS_EXP_OPTIONVAR, COMPARISON_EXP_OPTIONVAR,
VERSION_EXP_OPTIONVAR, PLAYBLAST_EXP_OPTIONVAR, FFMPEG_PATH_OPTIONVAR,
MAYAPY_PATH_OPTIONVAR, MEDIAPLAYER_PATH_OPTIONVAR,
MULTICACHE_EXP_OPTIONVAR, ensure_optionvars_exists)
from ncachefactory.batchcacher import BatchCacher
from ncachefactory.attributes import filter_invisible_nodes_for_manager
from ncachefactory.batch import send_batch_ncache_jobs, send_wedging_ncaches_jobs
from ncachefactory.timecallbacks import (
register_time_callback, add_to_time_callback, unregister_time_callback,
time_verbose, clear_time_callback_functions)
from ncachefactory.monitoring import MultiCacheMonitor
from ncachefactory.workspace import (
get_default_workspace, set_last_used_workspace)
from ncachefactory.workspacesetter import WorkspaceWidget
HELPFOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'help')
WINDOW_TITLE = "nCache Factory"
class NCacheManager(MayaQWidgetDockableMixin, QtWidgets.QWidget):
def __init__(self, parent=None):
super(NCacheManager, self).__init__(parent=parent)
self.setWindowTitle(WINDOW_TITLE)
self.workspace = None
self.processes = []
self.pathoptions = PathOptions(self)
self.workspace_widget = WorkspaceWidget()
self.nodetable = DynamicNodesTableWidget()
self.batch_monitor = MultiCacheMonitor(parent=self)
self.senders = CacheSendersWidget()
method = partial(self.create_cache, selection=False)
self.senders.cache_all_inc.released.connect(method)
method = partial(self.create_cache, selection=True)
self.senders.cache_selection_inc.released.connect(method)
method = partial(self.erase_cache, selection=False)
self.senders.cache_all.released.connect(method)
method = partial(self.erase_cache, selection=True)
self.senders.cache_selection.released.connect(method)
method = partial(self.append_cache, selection=True)
self.senders.append_cache.released.connect(method)
method = partial(self.append_cache, selection=False)
self.senders.append_cache_all.released.connect(method)
self.cacheoptions = CacheOptions()
self.cacheoptions_expander = Expander("Options", self.cacheoptions)
self.cacheoptions_expander.released.connect(self.save_optionvars)
self.batchcacher = BatchCacher()
method = partial(self.send_multi_cache, selection=False)
self.batchcacher.sendMultiCacheRequested.connect(method)
method = partial(self.send_multi_cache, selection=True)
self.batchcacher.sendMultiCacheSelectionRequested.connect(method)
method = partial(self.send_wedging_cache, selection=False)
self.batchcacher.sendWedgingCacheRequested.connect(method)
method = partial(self.send_wedging_cache, selection=True)
self.batchcacher.sendWedgingCacheSelectionRequested.connect(method)
self.batchcacher_expander = Expander('Batch Cacher', self.batchcacher)
self.batchcacher_expander.released.connect(self.save_optionvars)
self.playblast = PlayblastOptions()
self.playblast_expander = Expander("Playblast", self.playblast)
self.playblast_expander.released.connect(self.save_optionvars)
self.comparison = ComparisonWidget()
self.comparison.setFixedHeight(250)
self.comparison_expander = Expander("Comparisons", self.comparison)
self.comparison_expander.released.connect(self.save_optionvars)
self.versions = WorkspaceCacheversionsExplorer()
self.versions.infosModified.connect(self.nodetable.update_layout)
self.versions.cacheApplied.connect(self.nodetable.update_layout)
text = "Available Versions"
self.versions_expander = Expander(text, self.versions)
self.versions_expander.released.connect(self.save_optionvars)
self.workspace_widget.workspaceSet.connect(self.set_workspace)
self.nodetable.selectionIsChanged.connect(self.selection_changed)
self.main_widget = QtWidgets.QWidget()
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.layout.setSpacing(0)
self.layout.addWidget(self.workspace_widget)
self.layout.setSpacing(4)
self.layout.addWidget(self.nodetable)
self.layout.addWidget(self.senders)
self.layout.addSpacing(8)
self.layout.addWidget(self.versions_expander)
self.layout.addWidget(self.versions)
self.layout.addSpacing(0)
self.layout.addWidget(self.batchcacher_expander)
self.layout.addWidget(self.batchcacher)
self.layout.addSpacing(0)
self.layout.addWidget(self.cacheoptions_expander)
self.layout.addWidget(self.cacheoptions)
self.layout.addSpacing(0)
self.layout.addWidget(self.comparison_expander)
self.layout.addWidget(self.comparison)
self.layout.addSpacing(0)
self.layout.addWidget(self.playblast_expander)
self.layout.addWidget(self.playblast)
self.layout.addStretch(1)
self.menubar = QtWidgets.QMenuBar(self)
self.menufile = QtWidgets.QMenu('Misc', self.menubar)
self.menubar.addMenu(self.menufile)
self.editpath = QtWidgets.QAction('Dependencies path', self.menufile)
self.editpath.triggered.connect(self.pathoptions.show)
self.menufile.addAction(self.editpath)
self.show_monitor = QtWidgets.QAction('Batch cache monitor', self.menufile)
self.menufile.addAction(self.show_monitor)
self.show_monitor.triggered.connect(self.batch_monitor.show)
self.help = QtWidgets.QAction('Help', self.menufile)
self.menufile.addAction(self.help)
self.help.triggered.connect(self._call_help)
self.layout.setMenuBar(self.menubar)
self.scrollarea = QtWidgets.QScrollArea()
self.scrollarea.setFocusPolicy(QtCore.Qt.NoFocus)
self.scrollarea.setWidget(self.main_widget)
self.scrollarea.setWidgetResizable(True)
self.scrollarea.setMinimumWidth(420)
self.scrollarea.setAlignment(QtCore.Qt.AlignCenter)
self.scrollarea.setFrameShape(QtWidgets.QFrame.NoFrame)
self.main_layout = QtWidgets.QVBoxLayout(self)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.addWidget(self.scrollarea)
self.set_workspace(get_default_workspace())
def _call_help(self):
path = os.path.join(HELPFOLDER, 'index.html')
webbrowser.open(url=path)
def show(self, **kwargs):
super(NCacheManager, self).show(**kwargs)
self.apply_optionvars()
self.nodetable.show()
def closeEvent(self, event):
super(NCacheManager, self).closeEvent(event)
self.nodetable.closeEvent(event)
self.comparison.closeEvent(event)
self.workspace_widget.closeEvent(event)
self.save_optionvars()
def set_workspace(self, workspace):
self.workspace = workspace
self.nodetable.set_workspace(workspace)
self.batchcacher.set_workspace(workspace)
self.workspace_widget.set_workspace(workspace)
self.nodetable.update_layout()
def selection_changed(self):
nodes = self.nodetable.selected_nodes
if not self.nodetable.selected_nodes:
self.versions.set_nodes_and_cacheversions(None, None)
self.comparison.set_node_and_cacheversion(None, None)
return
workspace = self.workspace_widget.workspace
all_cacheversions = list_available_cacheversions(workspace)
available_cacheversions = filter_cacheversions_containing_nodes(
filter_invisible_nodes_for_manager(cmds.ls(type=DYNAMIC_NODES)),
all_cacheversions)
connected_cacheversions = filter_connected_cacheversions(
nodes, available_cacheversions)
if not connected_cacheversions:
self.comparison.set_node_and_cacheversion(None, None)
self.versions.set_nodes_and_cacheversions(nodes, available_cacheversions)
return
self.versions.set_nodes_and_cacheversions(nodes, available_cacheversions)
if not cacheversion_contains_node(nodes[0], connected_cacheversions[0]):
self.comparison.set_node_and_cacheversion(None, None)
return
self.comparison.set_node_and_cacheversion(nodes[0], connected_cacheversions[0])
def apply_optionvars(self):
ensure_optionvars_exists()
state = cmds.optionVar(query=MULTICACHE_EXP_OPTIONVAR)
self.batchcacher_expander.set_state(state)
state = cmds.optionVar(query=CACHEOPTIONS_EXP_OPTIONVAR)
self.cacheoptions_expander.set_state(state)
state = cmds.optionVar(query=PLAYBLAST_EXP_OPTIONVAR)
self.playblast_expander.set_state(state)
state = cmds.optionVar(query=COMPARISON_EXP_OPTIONVAR)
self.comparison_expander.set_state(state)
state = cmds.optionVar(query=VERSION_EXP_OPTIONVAR)
self.versions_expander.set_state(state)
def save_optionvars(self):
value = self.batchcacher_expander.state
cmds.optionVar(intValue=[MULTICACHE_EXP_OPTIONVAR, int(value)])
value = self.cacheoptions_expander.state
cmds.optionVar(intValue=[CACHEOPTIONS_EXP_OPTIONVAR, int(value)])
value = self.playblast_expander.state
cmds.optionVar(intValue=[PLAYBLAST_EXP_OPTIONVAR, int(value)])
value = self.comparison_expander.state
cmds.optionVar(intValue=[COMPARISON_EXP_OPTIONVAR, int(value)])
value = self.versions_expander.state
cmds.optionVar(intValue=[VERSION_EXP_OPTIONVAR, int(value)])
def sizeHint(self):
return QtCore.QSize(350, 650)
def create_cache(self, selection=True):
register_time_callback()
if self.cacheoptions.verbose is True:
add_to_time_callback(time_verbose)
start_frame, end_frame = self.cacheoptions.range
workspace = self.workspace_widget.directory
if workspace is None:
return cmds.warning("no workspace set")
set_last_used_workspace(workspace)
self.workspace_widget.populate()
if selection is True:
nodes = self.nodetable.selected_nodes or []
else:
nodes = cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
create_and_record_cacheversion(
workspace=workspace,
start_frame=start_frame,
end_frame=end_frame,
nodes=nodes,
behavior=self.cacheoptions.behavior,
evaluate_every_frame=self.cacheoptions.samples_evaluated,
save_every_evaluation=self.cacheoptions.samples_recorded,
playblast=self.playblast.record_playblast,
playblast_viewport_options=self.playblast.viewport_options)
self.nodetable.set_workspace(workspace)
self.nodetable.update_layout()
self.selection_changed()
unregister_time_callback()
clear_time_callback_functions()
def erase_cache(self, selection=True):
register_time_callback()
if self.cacheoptions.verbose is True:
add_to_time_callback(time_verbose)
start_frame, end_frame = self.cacheoptions.range
workspace = self.workspace_widget.directory
if workspace is None:
return cmds.warning("no workspace set")
set_last_used_workspace(workspace)
self.workspace_widget.populate()
if selection is True:
nodes = self.nodetable.selected_nodes or []
else:
nodes = cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
cacheversions = filter_connected_cacheversions(
nodes, list_available_cacheversions(workspace))
if not cacheversions or len(cacheversions) > 1:
cmds.warning(
'no valid cache version or more than one cacheversion are '
'connected to the selected dynamic nodes')
self.create_cache(selection=selection)
return
record_in_existing_cacheversion(
cacheversion=cacheversions[0],
start_frame=start_frame,
end_frame=end_frame,
nodes=nodes,
behavior=self.cacheoptions.behavior,
evaluate_every_frame=self.cacheoptions.samples_evaluated,
save_every_evaluation=self.cacheoptions.samples_recorded,
playblast=self.playblast.record_playblast,
playblast_viewport_options=self.playblast.viewport_options)
self.nodetable.update_layout()
self.selection_changed()
unregister_time_callback()
clear_time_callback_functions()
def append_cache(self, selection=True):
register_time_callback()
if self.cacheoptions.verbose is True:
add_to_time_callback(time_verbose)
workspace = self.workspace_widget.directory
if workspace is None:
return cmds.warning("no workspace set")
set_last_used_workspace(workspace)
self.workspace_widget.populate()
if selection is True:
nodes = self.nodetable.selected_nodes or []
else:
nodes = cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
if not nodes:
return cmds.warning("no nodes selected")
cacheversion = None
for node in nodes:
cacheversions = filter_connected_cacheversions(
[node], list_available_cacheversions(workspace))
if not cacheversions:
message = "some nodes doesn't have cache connected to append"
return cmds.warning(message)
if cacheversion is None:
cacheversion = cacheversions[0]
if cacheversions[0] != cacheversion:
message = "append cache on multiple version is not suppported."
return cmds.warning(message)
append_to_cacheversion(
nodes=nodes,
cacheversion=cacheversion,
evaluate_every_frame=self.cacheoptions.samples_evaluated,
save_every_evaluation=self.cacheoptions.samples_recorded,
playblast=self.playblast.record_playblast,
playblast_viewport_options=self.playblast.viewport_options)
self.nodetable.update_layout()
self.selection_changed()
unregister_time_callback()
clear_time_callback_functions()
def send_multi_cache(self, selection=False):
if self.workspace is None:
return cmds.warning("invalid workspace set")
set_last_used_workspace(self.workspace)
mayapy = cmds.optionVar(query=MAYAPY_PATH_OPTIONVAR)
if os.path.exists(mayapy) is False:
return cmds.warning("invalid mayapy path set")
if selection is True:
nodes = self.nodetable.selected_nodes or []
else:
nodes = cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
if not nodes:
return cmds.warning("no nodes selected")
start_frame, end_frame = self.cacheoptions.range
cacheversions, processes = send_batch_ncache_jobs(
workspace=self.workspace,
jobs=self.batchcacher.jobs,
start_frame=start_frame,
end_frame=end_frame,
nodes=nodes,
evaluate_every_frame=self.cacheoptions.samples_evaluated,
save_every_evaluation=self.cacheoptions.samples_recorded,
playblast_viewport_options=self.playblast.viewport_options,
timelimit=self.batchcacher.options.timelimit,
stretchmax=self.batchcacher.options.explosion_detection_tolerance)
self.processes.extend(processes)
for cacheversion, process in zip(cacheversions, processes):
self.batch_monitor.add_job(cacheversion, process)
self.batch_monitor.show()
self.batchcacher.clear()
self.nodetable.set_workspace(self.workspace)
self.nodetable.update_layout()
self.selection_changed()
def send_wedging_cache(self, selection=False):
if self.workspace is None:
return cmds.warning("invalid workspace set")
set_last_used_workspace(self.workspace)
mayapy = cmds.optionVar(query=MAYAPY_PATH_OPTIONVAR)
if os.path.exists(mayapy) is False:
return cmds.warning("invalid mayapy path set")
if selection is True:
nodes = self.nodetable.selected_nodes or []
else:
nodes = cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
if not nodes:
return cmds.warning("no nodes selected")
start_frame, end_frame = self.cacheoptions.range
cacheversions, processes = send_wedging_ncaches_jobs(
workspace=self.workspace,
name=self.batchcacher.wedging_name,
start_frame=start_frame,
end_frame=end_frame,
nodes=nodes,
evaluate_every_frame=self.cacheoptions.samples_evaluated,
save_every_evaluation=self.cacheoptions.samples_recorded,
playblast_viewport_options=self.playblast.viewport_options,
timelimit=self.batchcacher.options.timelimit,
stretchmax=self.batchcacher.options.explosion_detection_tolerance,
attribute=self.batchcacher.attribute,
values=self.batchcacher.wedging_values)
self.processes.extend(processes)
for cacheversion, process in zip(cacheversions, processes):
self.batch_monitor.add_job(cacheversion, process)
self.batch_monitor.show()
self.nodetable.set_workspace(self.workspace)
self.nodetable.update_layout()
self.selection_changed()
class Expander(QtWidgets.QPushButton):
def __init__(self, text, child, parent=None):
super(Expander, self).__init__(parent)
self.setStyleSheet('text-align: left; font: bold')
self.setFixedHeight(20)
self.icons = get_icon('arrow_close.png'), get_icon('arrow_open.png')
self.setText(text)
self.child = child
self.state = True
self.setIcon(self.icons[int(self.state)])
self.clicked.connect(self._call_clicked)
def _call_clicked(self):
self.state = not self.state
self.child.setVisible(self.state)
self.setIcon(self.icons[int(self.state)])
def set_state(self, state):
self.state = state
self.child.setVisible(self.state)
self.setIcon(self.icons[int(self.state)])
class CacheSendersWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(CacheSendersWidget, self).__init__(parent)
text = "Cache selection"
self.cache_selection = QtWidgets.QPushButton(text)
self.cache_selection_inc = QtWidgets.QPushButton("+")
self.cache_selection_inc.setFixedWidth(12)
text = "Cache all"
self.cache_all = QtWidgets.QPushButton(text)
self.cache_all_inc = QtWidgets.QPushButton("+")
self.cache_all_inc.setFixedWidth(12)
self.append_cache = QtWidgets.QPushButton("Append selection")
self.append_cache_all = QtWidgets.QPushButton("all")
self.append_cache_all.setFixedWidth(40)
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.layout.addWidget(self.cache_selection)
self.layout.addSpacing(1)
self.layout.addWidget(self.cache_selection_inc)
self.layout.addSpacing(4)
self.layout.addWidget(self.cache_all)
self.layout.addSpacing(1)
self.layout.addWidget(self.cache_all_inc)
self.layout.addSpacing(4)
self.layout.addWidget(self.append_cache)
self.layout.addSpacing(1)
self.layout.addWidget(self.append_cache_all)
class PathOptions(QtWidgets.QWidget):
def __init__(self, parent=None):
super(PathOptions, self).__init__(parent, QtCore.Qt.Tool)
self.setWindowTitle("Set external applications paths")
self.ffmpeg = BrowserLine()
self.ffmpeg.text.textEdited.connect(self.save_options)
function = partial(self.get_executable_path, self.ffmpeg)
self.ffmpeg.button.released.connect(function)
self.mayapy = BrowserLine()
self.mayapy.text.textEdited.connect(self.save_options)
function = partial(self.get_executable_path, self.mayapy)
self.mayapy.button.released.connect(function)
self.mediaplayer = BrowserLine()
self.mediaplayer.text.textEdited.connect(self.save_options)
function = partial(self.get_executable_path, self.mediaplayer)
self.mediaplayer.button.released.connect(function)
self.ok = QtWidgets.QPushButton("ok")
self.ok.setFixedWidth(85)
self.ok.released.connect(self.hide)
self.ok_layout = QtWidgets.QHBoxLayout()
self.ok_layout.addStretch(1)
self.ok_layout.addWidget(self.ok)
self.form_layout = QtWidgets.QFormLayout()
self.form_layout.addRow("FFMPEG:", self.ffmpeg)
self.form_layout.addRow("mayapy", self.mayapy)
self.form_layout.addRow("media player", self.mediaplayer)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addLayout(self.form_layout)
self.layout.addLayout(self.ok_layout)
self.set_ui_states()
def get_executable_path(self, browseline):
executables = QtWidgets.QFileDialog.getOpenFileName()
if not executables:
return
browseline.text.setText(executables[0])
self.save_options()
def set_ui_states(self):
ensure_optionvars_exists()
text = cmds.optionVar(query=FFMPEG_PATH_OPTIONVAR)
self.ffmpeg.text.setText(text)
text = cmds.optionVar(query=MAYAPY_PATH_OPTIONVAR)
self.mayapy.text.setText(text)
text = cmds.optionVar(query=MEDIAPLAYER_PATH_OPTIONVAR)
self.mediaplayer.text.setText(text)
def save_options(self, *useless_signal_args):
cmds.optionVar(stringValue=[FFMPEG_PATH_OPTIONVAR, self.ffmpeg.text.text()])
cmds.optionVar(stringValue=[MAYAPY_PATH_OPTIONVAR, self.mayapy.text.text()])
text = self.mediaplayer.text.text()
cmds.optionVar(stringValue=[MEDIAPLAYER_PATH_OPTIONVAR, text])
class BrowserLine(QtWidgets.QWidget):
def __init__(self):
super(BrowserLine, self).__init__()
self.text = QtWidgets.QLineEdit()
self.button = QtWidgets.QPushButton(get_icon("folder.png"), "")
self.button.setFixedSize(22, 22)
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.layout.addWidget(self.text)
self.layout.addWidget(self.button)
|
py | b4123c2c77704a9ab5250e8b26f2415a12e716f4 | """
Color functions
"""
import os
import numpy as np
from asaplib.data import ASAPXYZ
def set_color_function(fcolor='none', asapxyz=None, colorscol=0, n_samples=0,
peratom=False, project_atomic=False, use_atomic_species=None, color_from_zero=False, extensive=False):
""" obtain the essential informations to define the colors of data points
Parameters
----------
fcolor: str
the name of the file or the tag in the xyz to define the colors
asapxyz: ASAPXYZ object, (optional)
colorscol: int, (optional).
if the color file has more than one column, which column to use
n_samples: int, (optional).
The number of data points
peratom: bool
return atomic color
project_atomic: bool
the samples are atomic descriptors
use_atomic_species: int
the atomic number of the selected species
color_from_zero: bool
set the min color to zero
extensive: bool
normalize the quatity by number of atoms
"""
plotcolor = []
plotcolor_atomic = []
colorscale = [None, None]
# if there is a file named "fcolor", we load it for the color scheme
if os.path.isfile(fcolor):
# load the column=colorscol for color functions
try:
loadcolor = np.genfromtxt(fcolor, dtype=float)
except:
raise IOError('Error in loading fcolor files for the color scheme')
# print(np.shape(loadcolor))
if colorscol > 0 or len(np.shape(loadcolor)) > 1:
plotcolor = loadcolor[:, colorscol]
else:
plotcolor = loadcolor
print('load file: ' + fcolor + ' for color schemes')
if peratom or project_atomic:
if asapxyz is None:
raise IOError('Need the xyz so that we know the number of atoms in each frame')
elif asapxyz.get_num_frames() == len(plotcolor):
for index, natomnow in enumerate(asapxyz.get_natom_list_by_species(use_atomic_species)):
plotcolor_atomic = np.append(plotcolor_atomic, plotcolor[index] * np.ones(natomnow))
elif asapxyz.get_total_natoms() == len(plotcolor):
plotcolor_atomic = plotcolor
else:
raise ValueError('Length of the xyz trajectory is not the same as number of colors in the fcolor file')
elif n_samples > 0 and (fcolor == None or fcolor == 'none' or fcolor == 'Index' or fcolor == 'index') and peratom == False:
# we use the index as the color scheme
plotcolor = np.arange(n_samples)
fcolor = 'sample index'
elif asapxyz is None:
raise IOError('Cannot find the xyz or fcolor files for the color scheme')
else:
if fcolor == None or fcolor == 'none' or fcolor == 'Index' or fcolor == 'index':
# we use the index as the color scheme
plotcolor = np.arange(asapxyz.get_num_frames())
fcolor = 'sample index'
if peratom or project_atomic:
for index, natomnow in enumerate(asapxyz.get_natom_list_by_species(use_atomic_species)):
plotcolor_atomic = np.append(plotcolor_atomic, plotcolor[index] * np.ones(natomnow))
else:
try:
plotcolor = asapxyz.get_property(fcolor, extensive)
except:
raise ValueError('Cannot find the specified property from the xyz file for the color scheme')
if peratom or project_atomic:
try:
plotcolor_atomic = asapxyz.get_atomic_property(fcolor, extensive, [], use_atomic_species)
#print(np.shape(plotcolor_atomic))
except:
raise ValueError('Cannot find the specified atomic property from the xyz file for the color scheme')
if color_from_zero:
# set the min to zero
plotcolor -= np.ones(len(plotcolor))*np.nanmin(plotcolor)
plotcolor_atomic -= np.ones(len(plotcolor_atomic))*np.nanmin(plotcolor)
colorlabel = str(fcolor)
if peratom and not project_atomic:
# print(np.shape(plotcolor_atomic))
colorscale = [np.nanmin(plotcolor_atomic), np.nanmax(plotcolor_atomic)]
return plotcolor, np.asarray(plotcolor_atomic), colorlabel, colorscale
elif project_atomic:
colorscale = [None, None]
return np.asarray(plotcolor_atomic), [], colorlabel, colorscale
else:
colorscale = [None, None]
return plotcolor, [], colorlabel, colorscale
class COLOR_PALETTE:
def __init__(self, style=1):
if style == 1:
self.pal = ["#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059", "#0000A6", "#63FFAC", "#B79762",
"#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693",
"#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900",
"#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100", "#DDEFFF",
"#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F", "#372101",
"#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99",
"#001E09", "#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1",
"#788D66", "#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459",
"#456648", "#0086ED", "#886F4C", "#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375",
"#A3C8C9", "#FF913F", "#938A81", "#575329", "#00FECF", "#B05B6F",
"#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9",
"#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700", "#549E79",
"#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534",
"#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C", "#7A4900"]
elif style == 2:
self.pal = ["#30a2da", "#fc4f30", "#e5ae38", "#6d904f", "#8b8b8b", "#006FA6", "#A30059", "#af8dc3",
"#922329", "#1E6E00"]
self.n_color = len(self.pal)
def __getitem__(self, arg): # color cycler
assert arg > -1, "???"
return self.pal[arg % self.n_color]
|
py | b41241b9141d57f1e597aeb51daccb7eec5b56bf | """
Django settings for liveweather project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'liveweather.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'liveweather.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | b41243186f72ff6f6aa8073c9ff802d7d3ea6f7c | from django.test import TestCase
import networkx as nx
from cinema.tests.fixture_imbd_tsv import FixtureIMDbTsv
from cinema.cinegraph.grapher import PersonNode, WorkNode
from cinema.cinegraph import imdb_tsv
class TestIMDbTsv(TestCase, FixtureIMDbTsv):
def setUp(self):
FixtureIMDbTsv.setUp(self)
def tearDown(self):
pass
def test_person_node(self):
fred = imdb_tsv.IMDbPersonNode("nm0000001")
self.assertEqual(1, fred.id)
self.assertTrue(fred.is_person)
def test_work_node(self):
three_little_words = imdb_tsv.IMDbWorkNode("tt0043044")
self.assertEqual(43044, three_little_words.id)
self.assertFalse(three_little_words.is_person)
def test_add_person(self):
g = nx.Graph()
imdb_tsv.add_person(g, self.names[0])
expected = {
"name": "Fred Astaire",
"birth": "1899",
"death": "1987",
"professions": {"miscellaneous", "actor", "soundtrack"},
"known": {
WorkNode(53137),
WorkNode(50419),
WorkNode(72308),
WorkNode(43044),
},
}
actual = g.nodes[PersonNode(1)]
self.assertEqual(expected, actual)
def test_add_work(self):
g = nx.Graph()
imdb_tsv.add_work(g, self.basics[0])
expected = {
"title": "Three Little Words",
"adult": 0,
"start": "1950",
"end": "\\N",
"runtime": "102",
"genres": {"Biography", "Musical", "Comedy"},
}
actual = g.nodes[WorkNode(43044)]
self.assertEqual(expected, actual)
def test_update_rating(self):
g = nx.Graph()
imdb_tsv.add_work(g, self.basics[0])
imdb_tsv.update_rating(g, self.ratings[0])
expected = {
"title": "Three Little Words",
"adult": 0,
"start": "1950",
"end": "\\N",
"runtime": "102",
"genres": {"Musical", "Comedy", "Biography"},
"rating": 6.9,
"votes": 1497,
}
actual = g.nodes[WorkNode(43044)]
self.assertEqual(expected, actual)
def test_add_contribution(self):
g = self.make_graph()
expected = {"ordering": 1, "contributions": {"actor": {"job": "\\N"}}}
actual = g.edges[(PersonNode(1), WorkNode(43044))]
self.assertEqual(expected, actual)
def test_has_profession(self):
g = self.make_graph()
self.assertFalse(
imdb_tsv.has_profession_in_graph(g, PersonNode(8888888888), "actor")
)
self.assertFalse(imdb_tsv.has_profession_in_graph(g, PersonNode(1), "producer"))
self.assertTrue(imdb_tsv.has_profession_in_graph(g, PersonNode(3), "producer"))
def test_is_actor(self):
g = self.make_graph()
self.assertTrue(imdb_tsv.is_actor_in_graph(g, PersonNode(1)))
self.assertFalse(imdb_tsv.is_actor_in_graph(g, PersonNode(2)))
self.assertFalse(imdb_tsv.is_actor_in_graph(g, PersonNode(3)))
def test_is_actress(self):
g = self.make_graph()
self.assertFalse(imdb_tsv.is_actress_in_graph(g, PersonNode(1)))
self.assertTrue(imdb_tsv.is_actress_in_graph(g, PersonNode(2)))
self.assertTrue(imdb_tsv.is_actress_in_graph(g, PersonNode(3)))
def test_contributed_as(self):
g = self.make_graph()
thorpe = PersonNode(861703)
three_little_words = WorkNode(43044)
the_mirror_has_two_faces = WorkNode(117057)
self.assertIn(three_little_words, g.nodes)
self.assertIn((thorpe, three_little_words), g.edges)
self.assertTrue(
imdb_tsv.contributed_as_in_graph(g, thorpe, three_little_words, "director")
)
self.assertFalse(
imdb_tsv.contributed_as_in_graph(g, thorpe, three_little_words, "cook")
)
self.assertFalse(
imdb_tsv.contributed_as_in_graph(
g, thorpe, the_mirror_has_two_faces, "director"
)
)
def test_acted_in(self):
g = self.make_graph()
thorpe = PersonNode(861703)
astaire = PersonNode(1)
bacall = PersonNode(2)
three_little_words = WorkNode(43044)
the_mirror_has_two_faces = WorkNode(117057)
self.assertFalse(imdb_tsv.acted_in_in_graph(g, thorpe, three_little_words))
self.assertFalse(
imdb_tsv.acted_in_in_graph(g, thorpe, the_mirror_has_two_faces)
)
self.assertTrue(imdb_tsv.acted_in_in_graph(g, astaire, three_little_words))
self.assertFalse(
imdb_tsv.acted_in_in_graph(g, astaire, the_mirror_has_two_faces)
)
self.assertFalse(imdb_tsv.acted_in_in_graph(g, bacall, three_little_words))
self.assertTrue(imdb_tsv.acted_in_in_graph(g, bacall, the_mirror_has_two_faces))
def test_votes(self):
g = self.make_graph()
three_little_words = g.nodes[WorkNode(43044)]
the_mirror_has_two_faces = g.nodes[WorkNode(117057)]
self.assertEqual(1497, imdb_tsv.get_votes(three_little_words))
self.assertEqual(14369, imdb_tsv.get_votes(the_mirror_has_two_faces))
def test_rating(self):
g = self.make_graph()
three_little_words = g.nodes[WorkNode(43044)]
the_mirror_has_two_faces = g.nodes[WorkNode(117057)]
self.assertEqual(6.9, imdb_tsv.get_rating(three_little_words))
self.assertEqual(6.6, imdb_tsv.get_rating(the_mirror_has_two_faces))
def test_order(self):
g = self.make_graph()
e = g.edges[(PersonNode(1), WorkNode(43044))]
self.assertEqual(1, imdb_tsv.get_order(e))
e = g.edges[(PersonNode(1), WorkNode(50419))]
self.assertEqual(2, imdb_tsv.get_order(e))
e = g.edges[(PersonNode(2), WorkNode(117057))]
self.assertEqual(3, imdb_tsv.get_order(e))
e = g.edges[(PersonNode(861703), WorkNode(43044))]
self.assertEqual(5, imdb_tsv.get_order(e))
|
py | b412431ef4f59ea14a2d1e642e6f6b2fd362d171 | from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-unsignedShort-pattern-2-NS"
@dataclass
class NistschemaSvIvAtomicUnsignedShortPattern2:
class Meta:
name = "NISTSchema-SV-IV-atomic-unsignedShort-pattern-2"
namespace = "NISTSchema-SV-IV-atomic-unsignedShort-pattern-2-NS"
value: str = field(
default="",
metadata={
"required": True,
"pattern": r"\d{2}",
}
)
|
py | b41243a0ab19c6a0c98fb109c5a4853a045afb2a | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, List, Optional
from googleapiclient.discovery import Resource, build
from googleapiclient.http import MediaFileUpload
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GoogleAnalyticsHook(GoogleBaseHook):
"""
Hook for Google Analytics 360.
"""
def __init__(
self,
api_version: str = "v3",
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.api_version = api_version
self.gcp_connection_is = gcp_conn_id
self._conn = None
def _paginate(self, resource: Resource, list_args: Optional[Dict[str, Any]] = None):
list_args = list_args or {}
result: List[Dict] = []
while True:
# start index has value 1
request = resource.list(start_index=len(result) + 1, **list_args) # pylint: disable=no-member
response = request.execute(num_retries=self.num_retries)
result.extend(response.get("items", []))
# result is the number of fetched links from Analytics
# when all links will be added to the result
# the loop will break
if response["totalResults"] <= len(result):
break
return result
def get_conn(self) -> Resource:
"""
Retrieves connection to Google Analytics 360.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"analytics",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
def list_accounts(self) -> List[Dict[str, Any]]:
"""
Lists accounts list from Google Analytics 360.
"""
self.log.info("Retrieving accounts list...")
conn = self.get_conn()
accounts = conn.management().accounts() # pylint: disable=no-member
result = self._paginate(accounts)
return result
def get_ad_words_link(
self, account_id: str, web_property_id: str, web_property_ad_words_link_id: str
) -> Dict[str, Any]:
"""
Returns a web property-Google Ads link to which the user has access.
:param account_id: ID of the account which the given web property belongs to.
:type account_id: string
:param web_property_id: Web property-Google Ads link UA-string.
:type web_property_id: string
:param web_property_ad_words_link_id: to retrieve the Google Ads link for.
:type web_property_ad_words_link_id: string
:returns: web property-Google Ads
:rtype: Dict
"""
self.log.info("Retrieving ad words links...")
ad_words_link = (
self.get_conn() # pylint: disable=no-member
.management()
.webPropertyAdWordsLinks()
.get(
accountId=account_id,
webPropertyId=web_property_id,
webPropertyAdWordsLinkId=web_property_ad_words_link_id,
)
.execute(num_retries=self.num_retries)
)
return ad_words_link
def list_ad_words_links(
self, account_id: str, web_property_id: str
) -> List[Dict[str, Any]]:
"""
Lists webProperty-Google Ads links for a given web property.
:param account_id: ID of the account which the given web property belongs to.
:type account_id: str
:param web_property_id: Web property UA-string to retrieve the Google Ads links for.
:type web_property_id: str
:returns: list of entity Google Ads links.
:rtype: list
"""
self.log.info("Retrieving ad words list...")
conn = self.get_conn()
ads_links = (
conn.management().webPropertyAdWordsLinks() # pylint: disable=no-member
)
list_args = {"accountId": account_id, "webPropertyId": web_property_id}
result = self._paginate(ads_links, list_args)
return result
def upload_data(
self,
file_location: str,
account_id: str,
web_property_id: str,
custom_data_source_id: str,
resumable_upload: bool = False,
) -> None:
"""
Uploads file to GA via the Data Import API
:param file_location: The path and name of the file to upload.
:type file_location: str
:param account_id: The GA account Id to which the data upload belongs.
:type account_id: str
:param web_property_id: UA-string associated with the upload.
:type web_property_id: str
:param custom_data_source_id: Custom Data Source Id to which this data import belongs.
:type custom_data_source_id: str
:param resumable_upload: flag to upload the file in a resumable fashion, using a
series of at least two requests.
:type resumable_upload: bool
"""
media = MediaFileUpload(
file_location,
mimetype="application/octet-stream",
resumable=resumable_upload,
)
self.log.info(
"Uploading file to GA file for accountId: %s, webPropertyId:%s and customDataSourceId:%s ",
account_id,
web_property_id,
custom_data_source_id,
)
self.get_conn().management().uploads().uploadData( # pylint: disable=no-member
accountId=account_id,
webPropertyId=web_property_id,
customDataSourceId=custom_data_source_id,
media_body=media,
).execute()
def delete_upload_data(
self,
account_id: str,
web_property_id: str,
custom_data_source_id: str,
delete_request_body: Dict[str, Any],
) -> None:
"""
Deletes the uploaded data for a given account/property/dataset
:param account_id: The GA account Id to which the data upload belongs.
:type account_id: str
:param web_property_id: UA-string associated with the upload.
:type web_property_id: str
:param custom_data_source_id: Custom Data Source Id to which this data import belongs.
:type custom_data_source_id: str
:param delete_request_body: Dict of customDataImportUids to delete.
:type delete_request_body: dict
"""
self.log.info(
"Deleting previous uploads to GA file for accountId:%s, "
"webPropertyId:%s and customDataSourceId:%s ",
account_id,
web_property_id,
custom_data_source_id,
)
self.get_conn().management().uploads().deleteUploadData( # pylint: disable=no-member
accountId=account_id,
webPropertyId=web_property_id,
customDataSourceId=custom_data_source_id,
body=delete_request_body,
).execute()
def list_uploads(
self, account_id, web_property_id, custom_data_source_id
) -> List[Dict[str, Any]]:
"""
Get list of data upload from GA
:param account_id: The GA account Id to which the data upload belongs.
:type account_id: str
:param web_property_id: UA-string associated with the upload.
:type web_property_id: str
:param custom_data_source_id: Custom Data Source Id to which this data import belongs.
:type custom_data_source_id: str
"""
self.log.info(
"Getting list of uploads for accountId:%s, webPropertyId:%s and customDataSourceId:%s ",
account_id,
web_property_id,
custom_data_source_id,
)
uploads = self.get_conn().management().uploads() # pylint: disable=no-member
list_args = {
"accountId": account_id,
"webPropertyId": web_property_id,
"customDataSourceId": custom_data_source_id,
}
result = self._paginate(uploads, list_args)
return result
|
py | b41244169b99cf11768ddc4ef3e62bac28f4d706 | import numpy as np
from dyneusr import DyNeuGraph
from dyneusr.datasets import make_trefoil
from kmapper import KeplerMapper
from sklearn.decomposition import PCA
# Generate synthetic dataset
import tadasets
X = tadasets.sphere(n=500, r=1)
# Sort by first column
inds = np.argsort(X[:, 0])
X = X[inds].copy()
y = np.arange(X.shape[0])
# Generate shape graph using KeplerMapper
mapper = KeplerMapper(verbose=1)
lens = mapper.fit_transform(X, projection=PCA(2))
graph = mapper.map(lens, X, nr_cubes=6, overlap_perc=0.5)
# Visualize the shape graph using DyNeuSR's DyNeuGraph
dG = DyNeuGraph(G=graph, y=y)
dG.visualize('dyneusr4D_sphere.html', template='4D', static=True, show=True)
|
py | b41244214be89bf8351f4dab0bdfdef5b68a500d | #!/usr/bin/python3
import simerr
def Collatz(n):
cnt=0
if(simerr.simerror("Collatz",8)):
return -1
while(n>1):
cnt=cnt+1
if(n % 2):
n=n*3+1
else:
n/=2
return cnt
def retry():
if(simerr.simerror("retry",5)):
return -1
ret=Collatz(500)
while(ret<0):
simerr.simfix("If at first you don't succeed, try again.")
ret=Collatz(500)
return ret
simerr.siminit("teamjoe.log")
print("Collatz conjecture at 500 satisfied after step {0}\n".format(retry()))
simerr.simdone()
|
py | b41244ba0f0d3b3a45d1a0adbadc2bd3b67202a2 | '''Tests for numpy.distutils.build_ext.'''
import os
import subprocess
import sys
from textwrap import indent, dedent
import pytest
@pytest.mark.slow
def test_multi_fortran_libs_link(tmp_path):
'''
Ensures multiple "fake" static libraries are correctly linked.
see gh-18295
'''
# We need to make sure we actually have an f77 compiler.
# This is nontrivial, so we'll borrow the utilities
# from f2py tests:
from numpy.f2py.tests.util import has_f77_compiler
if not has_f77_compiler():
pytest.skip('No F77 compiler found')
# make some dummy sources
with open(tmp_path / '_dummy1.f', 'w') as fid:
fid.write(indent(dedent('''\
FUNCTION dummy_one()
RETURN
END FUNCTION'''), prefix=' '*6))
with open(tmp_path / '_dummy2.f', 'w') as fid:
fid.write(indent(dedent('''\
FUNCTION dummy_two()
RETURN
END FUNCTION'''), prefix=' '*6))
with open(tmp_path / '_dummy.c', 'w') as fid:
# doesn't need to load - just needs to exist
fid.write('int PyInit_dummyext;')
# make a setup file
with open(tmp_path / 'setup.py', 'w') as fid:
srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..')
fid.write(dedent(f'''\
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration("", parent_package, top_path)
config.add_library("dummy1", sources=["_dummy1.f"])
config.add_library("dummy2", sources=["_dummy2.f"])
config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"])
return config
if __name__ == "__main__":
import sys
sys.path.insert(0, r"{srctree}")
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())'''))
# build the test extensino and "install" into a temporary directory
build_dir = tmp_path
subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
'--prefix', str(tmp_path / 'installdir'),
'--record', str(tmp_path / 'tmp_install_log.txt'),
],
cwd=str(build_dir),
)
# get the path to the so
so = None
with open(tmp_path /'tmp_install_log.txt') as fid:
for line in fid:
if 'dummyext' in line:
so = line.strip()
break
assert so is not None
|
py | b412453b21a631bb02500cb00c839fc2cf5103eb | # This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
.. _dbus-shell:
Clacks D-Bus Shell plugin
^^^^^^^^^^^^^^^^^^^^^^^^^
The clacks-dbus shell plugin allows to execute shell scripts
on the client side with root privileges.
Scripts can be executed like this:
>>> clacksh
>>> Suche Dienstanbieter...
>>> ...
>>> clientDispatch("49cb1287-db4b-4ddf-bc28-5f4743eac594", "dbus_shell_list")
>>> [u'script1.sh', u'test.py']
>>> clientDispatch("49cb1287-db4b-4ddf-bc28-5f4743eac594", "dbus_shell_exec", "script1.sh", ['param1', 'param2'])
>>> {u'code': 0, u'stderr': u'', u'stdout': u'result'}
Creating scripts
^^^^^^^^^^^^^^^^
Create a new executable file in ``/etc/clacks/shell.d`` and ensure that its name match the
following expression ``^[a-zA-Z0-9][a-zA-Z0-9_\.]*$``.
The script can contain any programming language you want, it just has to be executable
and has to act on the parameter '-- --signature', see below.
The parameter -- --signature
............................
Each script has to return a signature when it is used with the parameter '-- --signature'.
This is required to populate the method to the clacks-dbus process.
A signature is a json string describing what is required and what is returned by the
script. See `dbus-python tutorial from freedesktop.org <http://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html>`_
for details on signatures.
Usually you'll pass strings to the script and it will return a string again:
>>> {"in": [{"param1": "s"},{"param2": "s"}], "out": "s"}
Example script
..............
Here is an example script:
>>> #!/bin/bash
>>> detail="-1"
>>> dir=$HOME
>>>
>>> usage() {
>>> echo $(basename $0) [--detail] [--directory DIR]
>>> exit 0
>>> }
>>>
>>> set -- `getopt -n$0 -u -a --longoptions="signature detail directory:" "h" "$@"` || usage
>>> [ $# -eq 0 ] && usage
>>>
>>> while [ $# -gt 0 ]
>>> do
>>> case "$1" in
>>> --signature)
>>> echo '{"in": [{"detail": "b"},{"directory": "s"}], "out": "s"}'
>>> exit 0
>>> ;;
>>> --detail)
>>> detail="-la"
>>> ;;
>>> --directory)
>>> dir=$2
>>> shift
>>> ;;
>>> -h) usage;;
>>> --) shift;break;;
>>> -*) usage;;
>>> *) break;;
>>> esac
>>> shift
>>> done
>>> ls $detail $dir
"""
import re
import os
import dbus.service
import logging
import inspect
from clacks.dbus.plugins.shell.shelldnotifier import ShellDNotifier
from subprocess import Popen, PIPE
from clacks.common import Environment
from clacks.common.components import Plugin
from clacks.dbus import get_system_bus
from json import loads
from dbus import validate_interface_name
from threading import Timer
class DBusShellException(Exception):
"""
Exception thrown for generic errors
"""
pass
class NoSuchScriptException(DBusShellException):
"""
Exception thrown for unknown scripts
"""
pass
class DBusShellHandler(dbus.service.Object, Plugin):
"""
The DBus shell handler exports shell scripts to the DBus.
Scripts placed in '/etc/clacks/shell.d' can then be executed using the
'shell_exec()' method.
Exported scripts can be listed using the 'shell_list()' method.
e.g.
print proxy.clientDispatch("<clientUUID>", "dbus_shell_exec", "myScript.sh", [])
(The '\_dbus' prefix in the above example was added by the clacks-client dbus-proxy
plugin to mark exported dbus methods - See clacks-client proxy plugin for details)
"""
# The path were scripts were read from.
script_path = None
log = None
file_regex = "^[a-zA-Z][a-zA-Z0-9]*$"
conn = None
# Time instance that helps us preventing event flooding
time_obj = None
time_int = 3
def __init__(self):
self.scripts = {}
# Connect to D-Bus service
conn = get_system_bus()
self.conn = conn
dbus.service.Object.__init__(self, conn, '/org/clacks/shell')
# Initialize paths and logging
self.log = logging.getLogger(__name__)
self.env = Environment.getInstance()
self.script_path = self.env.config.get("dbus.script-path", "/etc/clacks/shell.d").strip("'\"")
# Start notifier for file changes in /etc/clacks/shell.d
try:
ShellDNotifier(self.script_path, self.__notifier_callback)
# Intitially load all signatures
self.__notifier_callback()
except Exception:
self.log.error("failed to start monitoring of '%s'" % (self.script_path))
@dbus.service.signal('org.clacks', signature='s')
def _signatureChanged(self, filename):
"""
Sends a signal on the dbus named '_signatureChanged' this can then be received
by other processes like the clacks-client.
"""
pass
def __notifier_callback(self, fullpath=None):
"""
This method reads scripts found in the 'dbus.script-path' and
exports them as callable dbus-method.
"""
# Check if we've the required permissions to access the shell.d directory
if not os.path.exists(self.script_path):
self.log.debug("the script path '%s' does not exists! " % (self.script_path,))
else:
# If no path or file is given reload all signatures
if fullpath == None:
fullpath = self.script_path
# Collect files to look for recursivly
if os.path.isdir(fullpath):
files = map(lambda x: os.path.join(self.script_path, x), os.listdir(fullpath))
else:
files = [fullpath]
# Check each file if it matches the naming rules
for filename in files:
self._reload_signature(filename)
# Send some logging
self.log.info("found %s scripts to be registered" % (len(self.scripts.keys())))
for script in self.scripts.keys():
self.log.debug("registered script: %s" % script)
# Now send an event that indicates that the signature has changed.
# But wait a given amount of time, to see if more events will follow
# to avoid flooding the dbus with events.
# Cancel running jobs
if self.time_obj:
self.time_obj.cancel()
# Inititate a new job.
self.time_obj = Timer(self.time_int, self._signatureChanged, [""])
self.time_obj.start()
# Reload the list of regsitered methods
self.__reload_dbus_methods()
def _reload_signature(self, filepath=None):
"""
Reloads the signature for the given shell script.
"""
# We cannot register dbus methods containing '.' so replace them.
filename = os.path.basename(filepath)
dbus_func_name = os.path.splitext(filename)[0]
# Perform some checks
if not re.match(self.file_regex, dbus_func_name):
self.log.debug("skipped event for '%s' it does not match the required naming conditions" % (filename,))
# Check if the file was removed or changed.
elif not os.path.exists(filepath) and dbus_func_name in self.scripts:
## UNREGISTER Shell Script
del(self.scripts[dbus_func_name])
self.log.debug("unregistered D-Bus shell script '%s' (%s)" % (dbus_func_name, filename,))
try:
method = getattr(self, dbus_func_name)
self.unregister_dbus_method(method)
except:
raise
elif not os.path.isfile(filepath):
self.log.debug("skipped event for '%s' its not a file" % (filename,))
elif not os.access(filepath, os.X_OK):
self.log.debug("skipped event for '%s' its not an executable file" % (filename,))
else:
## REGISTER Shell Script
# Parse the script and if this was successful then add it te the list of known once.
data = self._parse_shell_script(filepath)
if data:
self.scripts[dbus_func_name] = data
self.log.debug("registered D-Bus shell script '%s' signatures is: %s" % (data[0], data[1]))
# Dynamically register dbus methods here
def f(self, *args):
args = [data[2]] + map(lambda x: str(x), args)
# Call the script with the --signature parameter
scall = Popen(args, stdout=PIPE, stderr=PIPE)
scall.wait()
return (scall.returncode, scall.stdout.read(), scall.stderr.read())
# Dynamically change the functions name and then register
# it as instance method to ourselves
setattr(f, '__name__', dbus_func_name)
setattr(self.__class__, dbus_func_name, f)
self.register_dbus_method(f, 'org.clacks', in_sig=data[1]['in'], out_sig='vvv')
def _parse_shell_script(self, path):
"""
This method executes the given script (path) with the parameter
'--signature' to receive the scripts signatur.
It returns a tuple containing all found agruments and their type.
"""
# Call the script with the --signature parameter
try:
scall = Popen([path, '--signature'], stdout=PIPE, stderr=PIPE)
scall.wait()
except OSError as error:
self.log.info("failed to read signature from D-Bus shell script '%s' (%s) " % (path, str(error)))
return
# Check returncode of the script call.
if scall.returncode != 0:
self.log.info("failed to read signature from D-Bus shell script '%s' (%s) " % (path, scall.stderr.read()))
# Check if we can read the returned signature.
sig = {}
try:
# Signature was readable, now check if we got everything we need
sig = loads(scall.stdout.read())
if not(('in' in sig and type(sig['in']) == list) or 'in' not in sig):
self.log.debug("failed to undertand in-signature of D-Bus shell script '%s'" % (path))
elif 'out' not in sig or type(sig['out']) not in [str, unicode]:
self.log.debug("failed to undertand out-signature of D-Bus shell script '%s'" % (path))
else:
return (os.path.basename(path), sig, path)
except ValueError:
self.log.debug("failed to undertand signature of D-Bus shell script '%s'" % (path))
return None
@dbus.service.method('org.clacks', in_signature='', out_signature='av')
def shell_list(self):
"""
Returns all availabe scripts and their signatures.
"""
return self.scripts
@dbus.service.method('org.clacks', in_signature='sas', out_signature='a{sv}')
def shell_exec(self, action, args):
"""
Executes a shell command and returns the result with its return code
stderr and stdout strings.
"""
# Check if the given script exists
if action not in self.scripts:
raise NoSuchScriptException("unknown service %s" % action)
cmd = self.scripts[action][2]
# Execute the script and return the results
args = map(lambda x: str(x), [os.path.join(self.script-path, cmd)] + args)
res = Popen(args, stdout=PIPE, stderr=PIPE)
res.wait()
return ({'code': res.returncode,
'stdout': res.stdout.read(),
'stderr': res.stderr.read()})
def register_dbus_method(self, func, dbus_interface, in_sig, out_sig):
"""
Marks the given method as exported to the dbus.
"""
# Validate the given DBus interface
validate_interface_name(dbus_interface)
# Dynamically create argument list
args = []
out_signature = out_sig
in_signature = ""
for entry in in_sig:
args.append(entry.keys()[0])
in_signature += entry.values()[0]
# Set DBus specific properties
func._dbus_is_method = True
func._dbus_async_callbacks = None
func._dbus_interface = dbus_interface
func._dbus_in_signature = in_signature
func._dbus_out_signature = out_signature
func._dbus_sender_keyword = None
func._dbus_path_keyword = None
func._dbus_rel_path_keyword = None
func._dbus_destination_keyword = None
func._dbus_message_keyword = None
func._dbus_connection_keyword = None
func._dbus_args = args
func._dbus_get_args_options = {'byte_arrays': False,
'utf8_strings': False}
def unregister_dbus_method(self, method):
"""
Unmarks the given method as exported to the dbus.
"""
# Extract the function and its parameters
func = method.__func__
func._dbus_is_method = None
func._dbus_async_callbacks = None
func._dbus_interface = None
func._dbus_in_signature = None
func._dbus_out_signature = None
def __reload_dbus_methods(self):
"""
Reloads the list of exported dbus methods.
This should be called once we've registered or unregistered
a method to the dbus.
"""
# Manually reload the list of registered methods.
# Reset list first
cname = self.__module__ + "." + self.__class__.__name__
old_list = self._dbus_class_table[cname]['org.clacks']
try:
# Reload list
for func in inspect.getmembers(self, predicate=inspect.ismethod):
if getattr(func[1].__func__, '_dbus_interface', False):
self._dbus_class_table[cname]['org.clacks'][func[0]] = func[1].__func__
# Restore the old method list if something goes wrong
except Exception as error:
self._dbus_class_table[cname]['org.clacks'] = old_list
raise DBusShellException("failed to manually register dbus method: %s" % (str(error),))
|
py | b412457a9026a1afa648fc1030db2440579482a6 | import json
import random
def is_bad_name_format(name):
l = name['lang']
return l.get('jp') is None and l.get('en') is None and l.get(
'zh') is None and l.get('kr') is None
def map_name(name):
lang = {}
if jp := name.get('jp'):
lang['jp'] = jp
if en := name.get('en'):
lang['en'] = en
if zh := name.get('zh'):
lang['zh'] = zh
if kr := name.get('kr'):
lang['kr'] = kr
return {'lang': lang, "default": name.get('default')}
def main():
with open(f'src/data/vdb.json', 'r', encoding='utf-8') as f:
vdb = json.load(f)
vtbs = [x for x in vdb['vtbs'] if x['type'] == 'vtuber']
groups = [x for x in vdb['vtbs'] if x['type'] == 'group']
new_vtbs = []
new_groups = []
for vtb in vtbs:
new_name = map_name(vtb['name'])
if is_bad_name_format(new_name):
continue
new = {
'uuid': vtb['uuid'],
'name': new_name,
'subscribed': random.randint(0, 1) == 1,
}
if group := vtb.get('group'):
new['group'] = group
new_vtbs.append(new)
for group in groups:
new_name = map_name(group['name'])
if is_bad_name_format(new_name):
continue
new_groups.append({
'uuid': group['uuid'],
'name': new_name,
})
count = random.randrange(50, 150)
print(f"{count} vtbs")
print(f"{len(new_groups)} groups")
with open(f'src/data/vdb.new.json', 'w', encoding='utf-8') as f:
json.dump(
{
'vtbs': random.sample(new_vtbs, k=count),
'groups': new_groups
},
f,
ensure_ascii=False,
)
if __name__ == '__main__':
main()
|
py | b41245dbb5f856319049b29cab25dda9d4c1e664 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workflow for Wikidata and Wikipedia processing"""
import sling.flags as flags
from sling.task import *
import sling.task.corpora as corpora
flags.define("--index",
help="index wiki data sets",
default=False,
action='store_true')
flags.define("--only_primary_language",
help="only use wikidata labels from primary language",
default=False,
action='store_true')
flags.define("--only_known_languages",
help="only use wikidata labels from known languages",
default=False,
action='store_true')
flags.define("--skip_wikipedia_mapping",
help="skip wikipedia mapping step",
default=False,
action='store_true')
flags.define("--extra_items",
help="additional items with info",
default=None,
metavar="RECFILES")
flags.define("--lbzip2",
help="use lbzip2 for parallel decompression",
default=False,
action='store_true')
class WikiWorkflow:
def __init__(self, name=None, wf=None):
if wf == None: wf = Workflow(name)
self.wf = wf
#---------------------------------------------------------------------------
# Wikidata
#---------------------------------------------------------------------------
def wikidata_dump(self):
"""Resource for wikidata dump. This can be downloaded from wikimedia.org
and contains a full dump of Wikidata in JSON format."""
return self.wf.resource(corpora.wikidata_dump(), format="text/json")
def wikidata_items(self):
"""Resource for wikidata items. This is a set of record files where each
WikiData item is represented as a frame:
<qid>: {
=<qid>
:/w/item
name: "..."
description: "..."
alias: {
name: "..."
lang: /lang/<lang>
sources: ...
}
...
/w/wikipedia: {
/lang/<lang>: <wid>
...
}
... properties
}
<qid>: Wikidata item id (Q<item number>, e.g. Q35)
<pid>: Wikidata property id (P<property number>, e.g. P31)
<wid>: Wikipedia page id (/wp/<lang>/<pageid>, /wp/en/76972)
"""
return self.wf.resource("[email protected]",
dir=corpora.wikidir(),
format="records/frame")
def wikidata_properties(self):
"""Resource for wikidata properties. This is a record file where each
Wikidata property is represented as a frame.
<pid>: {
=<pid>
:/w/property
name: "..."
description: "..."
/w/datatype: ...
... properties ...
}
"""
return self.wf.resource("properties.rec",
dir=corpora.wikidir(),
format="records/frame")
def wikidata_import(self, input, name=None):
"""Task for converting Wikidata JSON to SLING items and properties."""
task = self.wf.task("wikidata-importer", name=name)
task.add_param("primary_language", flags.arg.language)
task.add_param("only_primary_language", flags.arg.only_primary_language)
task.add_param("only_known_languages", flags.arg.only_known_languages)
self.wf.connect(input, task)
items = self.wf.channel(task, name="items", format="message/frame")
properties = self.wf.channel(task, name="properties",
format="message/frame")
return items, properties
def wikidata(self, dump=None):
"""Import Wikidata dump to frame format. It takes a Wikidata dump in JSON
format as input and converts each item and property to a SLING frame.
Returns the item and property output files."""
if dump == None: dump = self.wikidata_dump()
with self.wf.namespace("wikidata"):
if flags.arg.lbzip2:
input = self.wf.pipe("lbzip2 -d -c " + dump.name,
name="wiki-decompress",
format="text/json")
else:
input = self.wf.read(dump)
input = self.wf.parallel(input, threads=5)
items, properties = self.wikidata_import(input)
items_output = self.wikidata_items()
self.wf.write(items, items_output, name="item-writer")
properties_output = self.wikidata_properties()
self.wf.write(properties, properties_output, name="property-writer")
return items_output, properties_output
#---------------------------------------------------------------------------
# Wikipedia
#---------------------------------------------------------------------------
def wikipedia_dump(self, language=None):
"""Resource for wikipedia dump. This can be downloaded from wikimedia.org
and contrains a full dump of Wikipedia in a particular language. This is
in XML format with the articles in Wiki markup format."""
if language == None: language = flags.arg.language
return self.wf.resource(corpora.wikipedia_dump(language),
format="xml/wikipage")
def wikipedia_articles(self, language=None):
"""Resource for wikipedia articles. This is a set of record files where each
Wikipedia article is encoded as a SLING document.
<wid>: {
=<wid>
:/wp/page
/wp/page/pageid: ...
/wp/page/title: "..."
lang: /lang/<lang>
/wp/page/text: "<Wikipedia page in Wiki markup format>"
}
"""
if language == None: language = flags.arg.language
return self.wf.resource("[email protected]",
dir=corpora.wikidir(language),
format="records/frame")
def wikipedia_categories(self, language=None):
"""Resource for wikipedia categories. This is a set of record files where
each Wikipedia article is encoded as a SLING document.
"""
if language == None: language = flags.arg.language
return self.wf.resource("[email protected]",
dir=corpora.wikidir(language),
format="records/frame")
def wikipedia_redirects(self, language=None):
"""Resource for wikidata redirects. This is encoded as a SLING frame store
where each redirect is a SLING frame.
{
=<wid for redirect page>
:/wp/redirect
/wp/redirect/pageid: ...
/wp/redirect/title: "..."
/wp/redirect/link: <wid for target page>
}
"""
if language == None: language = flags.arg.language
return self.wf.resource("redirects.sling",
dir=corpora.wikidir(language),
format="store/frame")
def wikipedia_mapping(self, language=None):
"""Resource for wikipedia to wikidata mapping. This is a SLING frame store
with one frame per Wikipedia article with infomation for mapping it to
Wikidata.
{
=<wid>
/w/item/qid: <qid>
/w/item/kind: /w/item/kind/...
}
"""
if language == None: language = flags.arg.language
return self.wf.resource("mapping.sling",
dir=corpora.wikidir(language),
format="store/frame")
def wikipedia_documents(self, language=None):
"""Resource for parsed Wikipedia documents. This is a set of record files
with one record per article, where the text has been extracted from the
wiki markup and tokenized. The documents also contains additional
structured information (e.g. categories) and mentions for links to other
Wikipedia pages:
<wid>: {
=<wid>
:/wp/page
/wp/page/pageid: ...
/wp/page/title: "..."
lang: /lang/<lang>
/wp/page/text: "<Wikipedia page in wiki markup format>"
/wp/page/qid: <qid>
:document
url: "http://<lang>.wikipedia.org/wiki/<name>"
title: "..."
text: "<clear text extracted from wiki markup>"
tokens: [...]
mention: {
:/wp/link
begin: ...
length: ...
evokes: <qid>
}
...
/wp/page/category: <qid>
...
}
"""
if language == None: language = flags.arg.language
return self.wf.resource("[email protected]",
dir=corpora.wikidir(language),
format="records/document")
def wikipedia_category_documents(self, language=None):
"""Resource for parsed Wikipedia category documents.
"""
if language == None: language = flags.arg.language
return self.wf.resource("[email protected]",
dir=corpora.wikidir(language),
format="records/document")
def wikipedia_aliases(self, language=None):
"""Resource for wikipedia aliases. The aliases are extracted from the
Wiipedia pages from anchors, redirects, disambiguation pages etc. This is
a set of record files with a SLING frame record for each item:
<qid>: {
alias: {
name: "<alias>"
lang: /lang/<lang>
sources: ...
count: ...
}
...
}
"""
if language == None: language = flags.arg.language
return self.wf.resource("[email protected]",
dir=corpora.wikidir(language),
format="records/alias")
def language_defs(self):
"""Resource for language definitions. This defines the /lang/<lang>
symbols and has meta information for each language."""
return self.wf.resource("languages.sling",
dir=corpora.repository("data/wiki"),
format="store/frame")
def template_defs(self, language=None):
"""Resource for template definitions."""
if language == None: language = flags.arg.language
return self.wf.resource("templates.sling",
dir=corpora.repository("data/wiki/" + language),
format="store/frame")
def wikipedia_import(self, input, name=None):
"""Task for converting Wikipedia dump to SLING articles and redirects.
Returns article, categories, and redirect channels."""
task = self.wf.task("wikipedia-importer", name=name)
task.attach_input("input", input)
articles = self.wf.channel(task, name="articles", format="message/frame")
categories = self.wf.channel(task, name="categories",
format="message/frame")
redirects = self.wf.channel(task, name="redirects", format="message/frame")
return articles, categories, redirects
def wikipedia(self, dump=None, language=None):
"""Convert Wikipedia dump to SLING articles and store them in a set of
record files. Returns output resources for articles and redirects."""
if language == None: language = flags.arg.language
if dump == None: dump = self.wikipedia_dump(language)
with self.wf.namespace(language + "-wikipedia"):
# Import Wikipedia dump and convert to SLING format.
articles, categories, redirects = self.wikipedia_import(dump)
# Write articles.
articles_output = self.wikipedia_articles(language)
self.wf.write(articles, articles_output, name="article-writer")
# Write categories.
categories_output = self.wikipedia_categories(language)
self.wf.write(categories, categories_output, name="category-writer")
# Write redirects.
redirects_output = self.wikipedia_redirects(language)
self.wf.write(redirects, redirects_output, name="redirect-writer")
return articles_output, categories_output, redirects_output
def wikimap(self, wikidata_items=None, language=None, name=None):
"""Task for building mapping from Wikipedia IDs (<wid>) to Wikidata
IDs (<qid>). Returns file with frame store for mapping."""
if language == None: language = flags.arg.language
if wikidata_items == None: wikidata_items = self.wikidata_items()
wiki_mapping = self.wf.map(wikidata_items, "wikipedia-mapping",
params={"language": language},
name=name)
output = self.wikipedia_mapping(language)
self.wf.write(wiki_mapping, output, name="mapping-writer")
return output
def parse_wikipedia_articles(self,
articles=None,
categories=None,
redirects=None,
commons=None,
wikimap=None,
language=None):
"""Task for parsing Wikipedia articles to SLING documents and aliases.
Returns channels for documents and aliases."""
if language == None: language = flags.arg.language
if articles == None: articles = self.wikipedia_articles(language)
if categories == None: categories = self.wikipedia_categories(language)
if redirects == None: redirects = self.wikipedia_redirects(language)
if commons == None:
commons = [
self.language_defs(),
self.template_defs(language),
self.unit_defs(),
self.calendar_defs(),
self.country_defs(),
]
if wikimap == None: wikimap = self.wikipedia_mapping(language)
parser = self.wf.task("wikipedia-document-builder", "wikipedia-documents")
parser.add_param("language", language)
parser.add_param("skip_tables", True)
self.wf.connect(self.wf.read(articles, name="article-reader"), parser)
self.wf.connect(self.wf.read(categories, name="category-reader"), parser)
parser.attach_input("commons", commons)
parser.attach_input("wikimap", wikimap)
parser.attach_input("redirects", redirects)
documents = self.wf.channel(parser, format="message/document")
aliases = self.wf.channel(parser, "aliases", format="message/qid:alias")
catdocs = self.wf.channel(parser, "categories", format="message/qid:alias")
return documents, aliases, catdocs
def parse_wikipedia(self, language=None):
"""Parse Wikipedia articles and build alias table."""
if language == None: language = flags.arg.language
with self.wf.namespace(language + "-wikipedia"):
with self.wf.namespace("mapping"):
# Build mapping from Wikipedia IDs to Wikidata IDs.
if not flags.arg.skip_wikipedia_mapping:
self.wikimap(language=language)
with self.wf.namespace("parsing"):
# Parse Wikipedia articles to SLING documents.
documents, aliases, catdocs = \
self.parse_wikipedia_articles(language=language)
# Write Wikipedia documents.
document_output = self.wikipedia_documents(language)
self.wf.write(documents, document_output, name="document-writer",
params={"indexed": flags.arg.index})
# Write Wikipedia category documents.
category_document_output = self.wikipedia_category_documents(language)
self.wf.write(catdocs, category_document_output, name="document-writer",
params={"indexed": flags.arg.index})
with self.wf.namespace("aliases"):
# Collect aliases.
alias_output = self.wikipedia_aliases(language)
self.wf.reduce(self.wf.shuffle(aliases, len(alias_output)),
alias_output,
"wikipedia-alias-reducer",
params={'language': language})
return document_output, alias_output
#---------------------------------------------------------------------------
# Wikipedia items
#---------------------------------------------------------------------------
def wikipedia_items(self):
"""Resource for item data from Wikipedia . This merges the item categories
from all Wikipedias.
"""
return self.wf.resource("wikipedia-items.rec",
dir=corpora.wikidir(),
format="records/frame")
def wikipedia_members(self):
"""Resource for members of categories.
"""
return self.wf.resource("wikipedia-members.rec",
dir=corpora.wikidir(),
format="records/frame")
def merge_wikipedia_categories(self, languages=None):
"""Merge Wikipedia categories for all languages."""
if languages == None: languages = flags.arg.languages
with self.wf.namespace("wikipedia-categories"):
documents = []
for language in languages:
documents.extend(self.wikipedia_documents(language))
documents.extend(self.wikipedia_category_documents(language))
return self.wf.mapreduce(input=documents,
output=self.wikipedia_items(),
mapper="category-item-extractor",
reducer="category-item-merger",
format="message/frame")
def invert_wikipedia_categories(self, languages=None):
"""Invert category membership."""
if languages == None: languages = flags.arg.languages
with self.wf.namespace("wikipedia-members"):
return self.wf.mapreduce(input=self.wikipedia_items(),
output=self.wikipedia_members(),
mapper="category-inverter",
reducer="category-member-merger",
format="message/string",
params={"threshold": 100000})
#---------------------------------------------------------------------------
# Wikipedia link graph
#---------------------------------------------------------------------------
def wikilinks(self):
"""Resource for link graph."""
return self.wf.resource("[email protected]",
dir=corpora.wikidir(),
format="records/frame")
def fanin(self):
"""Resource for link fan-in."""
return self.wf.resource("fanin.rec",
dir=corpora.wikidir(),
format="records/frame")
def extract_links(self):
# Build link graph over all Wikipedias and compute item popularity.
documents = []
for l in flags.arg.languages:
documents.extend(self.wikipedia_documents(l))
# Extract links from documents.
mapper = self.wf.task("wikipedia-link-extractor")
self.wf.connect(self.wf.read(documents), mapper)
wiki_links = self.wf.channel(mapper, format="message/frame", name="output")
wiki_counts = self.wf.channel(mapper, format="message/int", name="fanin")
# Extract fact targets from items.
target_extractor = self.wf.task("fact-target-extractor")
self.wf.connect(self.wf.read(self.wikidata_items()), target_extractor)
item_counts = self.wf.channel(target_extractor, format="message/int")
# Reduce links.
with self.wf.namespace("links"):
wikilinks = self.wikilinks()
self.wf.reduce(self.wf.shuffle(wiki_links, shards=length_of(wikilinks)),
wikilinks, "wikipedia-link-merger")
# Reduce fan-in.
with self.wf.namespace("popularity"):
counts = self.wf.collect(wiki_counts, item_counts)
fanin = self.fanin()
self.wf.reduce(self.wf.shuffle(counts, shards=length_of(fanin)),
fanin, "item-popularity-reducer")
return wikilinks, fanin
#---------------------------------------------------------------------------
# Fused items
#---------------------------------------------------------------------------
def fused_items(self):
"""Resource for merged items. This is a set of record files where each
item is represented as a frame.
"""
return self.wf.resource("[email protected]",
dir=corpora.wikidir(),
format="records/frame")
def fuse_items(self, items=None, extras=None, output=None):
if items == None:
items = self.wikidata_items() + self.wikilinks() + [
self.fanin(),
self.wikipedia_items(),
self.wikipedia_members()
]
if flags.arg.extra_items:
extra = self.wf.resource(flags.arg.extra_items, format="records/frame")
if isinstance(extra, list):
items.extend(extra)
else:
items.append(extra)
if extras != None:
if isinstance(extras, list):
items.extend(extras)
else:
items.append(extras)
if output == None: output = self.fused_items();
with self.wf.namespace("fused-items"):
return self.wf.mapreduce(input=items,
output=output,
mapper=None,
reducer="item-merger",
format="message/frame",
params={"indexed": flags.arg.index})
#---------------------------------------------------------------------------
# Knowledge base
#---------------------------------------------------------------------------
def calendar_defs(self):
"""Resource for calendar definitions."""
return self.wf.resource("calendar.sling",
dir=corpora.repository("data/wiki"),
format="store/frame")
def country_defs(self):
"""Resource for country definitions."""
return self.wf.resource("countries.sling",
dir=corpora.repository("data/wiki"),
format="store/frame")
def unit_defs(self):
"""Resource for calendar definitions."""
return self.wf.resource("units.sling",
dir=corpora.repository("data/wiki"),
format="store/frame")
def wikidata_defs(self):
"""Resource for Wikidata schema definitions."""
return self.wf.resource("wikidata.sling",
dir=corpora.repository("data/wiki"),
format="store/frame")
def wikipedia_defs(self):
"""Resource for Wikipedia schema definitions."""
return self.wf.resource("wikipedia.sling",
dir=corpora.repository("data/wiki"),
format="store/frame")
def knowledge_base(self):
"""Resource for knowledge base. This is a SLING frame store with frames for
each Wikidata item and property plus additional schema information.
"""
return self.wf.resource("kb.sling",
dir=corpora.wikidir(),
format="store/frame")
def schema_defs(self):
"""Resources for schemas included in knowledge base."""
return [
self.language_defs(),
self.calendar_defs(),
self.country_defs(),
self.unit_defs(),
self.wikidata_defs(),
self.wikipedia_defs()
]
def build_knowledge_base(self,
items=None,
properties=None,
schemas=None):
"""Task for building knowledge base store with items, properties, and
schemas."""
if items == None: items = self.fused_items()
if properties == None: properties = self.wikidata_properties()
if schemas == None: schemas = self.schema_defs()
with self.wf.namespace("wikidata"):
# Prune information from Wikidata items.
pruned_items = self.wf.map(items, "wikidata-pruner",
params={"prune_aliases": True,
"prune_wiki_links": True,
"prune_category_members": True})
# Collect property catalog.
property_catalog = self.wf.map(properties, "wikidata-property-collector")
# Collect frames into knowledge base store.
parts = self.wf.collect(pruned_items, property_catalog, schemas)
return self.wf.write(parts, self.knowledge_base(),
params={"snapshot": True})
#---------------------------------------------------------------------------
# Item names
#---------------------------------------------------------------------------
def item_names(self, language=None):
"""Resource for item names in language. This is a set of record files with
one SLING frame per item.
<qid>: {
alias: {
name: "<alias>"
lang: /lang/<lang>
sources: ...
count: ...
form: ...
}
...
}
"""
if language == None: language = flags.arg.language
return self.wf.resource("[email protected]",
dir=corpora.wikidir(language),
format="records/alias")
def alias_corrections(self):
"""Resource for alias corrections."""
return self.wf.resource("aliases.sling",
dir=corpora.repository("data/wiki"),
format="store/frame")
def extract_names(self, aliases=None, language=None):
"Task for selecting language-dependent names for items."""
if language == None: language = flags.arg.language
if aliases == None:
# Get language-dependent aliases from Wikidata and Wikpedia.
wikidata_aliases = self.wf.map(self.fused_items(),
"alias-extractor",
params={
"language": language,
"skip_aux": True,
},
format="message/alias",
name="wikidata-alias-extractor")
wikipedia_aliases = self.wf.read(self.wikipedia_aliases(language),
name="wikipedia-alias-reader")
aliases = wikipedia_aliases + [wikidata_aliases]
# Merge alias sources.
names = self.item_names(language)
merged_aliases = self.wf.shuffle(aliases, len(names))
# Filter and select aliases.
selector = self.wf.reduce(merged_aliases, names, "alias-reducer",
params={
"language": language,
"anchor_threshold": 30,
"min_prefix": 1,
"max_edit_distance": {
"fi": 5, "pl": 5
}.get(language, 3)
})
selector.attach_input("commons", self.alias_corrections())
return names
#---------------------------------------------------------------------------
# Name table
#---------------------------------------------------------------------------
def name_table(self, language=None):
"""Resource for item name table. This is a repository with all the names
and the items they are aliases for."""
if language == None: language = flags.arg.language
return self.wf.resource("name-table.repo",
dir=corpora.wikidir(language),
format="repository")
def build_name_table(self, names=None, language=None):
"""Build name table for all items."""
if language == None: language = flags.arg.language
if names == None: names = self.item_names(language)
with self.wf.namespace("name-table"):
builder = self.wf.task("name-table-builder")
builder.add_param("language", language)
self.wf.connect(self.wf.read(names, name="name-reader"), builder)
repo = self.name_table(language)
builder.attach_output("repository", repo)
return repo
#---------------------------------------------------------------------------
# Phrase table
#---------------------------------------------------------------------------
def phrase_table(self, language=None):
"""Resource for item name phrase table. This is a repository with phrase
fingerprints of the item names."""
if language == None: language = flags.arg.language
return self.wf.resource("phrase-table.repo",
dir=corpora.wikidir(language),
format="repository")
def build_phrase_table(self, names=None, language=None):
"""Build phrase table for all items."""
if language == None: language = flags.arg.language
if names == None: names = self.item_names(language)
with self.wf.namespace("phrase-table"):
builder = self.wf.task("phrase-table-builder")
builder.add_param("language", language)
builder.add_param("transfer_aliases", True)
self.wf.connect(self.wf.read(names, name="name-reader"), builder)
kb = self.knowledge_base()
repo = self.phrase_table(language)
builder.attach_input("commons", kb)
builder.attach_output("repository", repo)
return repo
|
py | b412476bb6445cfc799ada64172bfca862cee39d | # Copyright 2019-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import isfile, join
from SCons.Script import Import, Return
Import("env")
board = env.BoardConfig()
platform = env.PioPlatform()
core = board.get("build.core", "")
def get_suitable_optiboot_binary(framework_dir, board_config):
mcu = board_config.get("build.mcu", "").lower()
f_cpu = board_config.get("build.f_cpu", "16000000L").upper()
uart = board_config.get("hardware.uart", "uart0").upper()
bootloader_led = board_config.get("bootloader.led_pin", "").upper()
if core == "MightyCore" and board_config.get("build.variant", "") == "bobuino":
bootloader_led = "B7"
bootloader_file = "optiboot_flash_%s_%s_%s_%s_%s.hex" % (
mcu, uart, board_config.get(
"bootloader.speed", env.subst("$UPLOAD_SPEED")), f_cpu, bootloader_led)
bootloader_path = join(framework_dir, "bootloaders", "optiboot_flash",
"bootloaders", mcu, f_cpu, bootloader_file)
if isfile(bootloader_path):
return bootloader_path
return bootloader_path.replace(".hex", "_BIGBOOT.hex")
common_cmd = [
"avrdude", "-p", "$BOARD_MCU", "-e", "-C",
'"%s"' % join(platform.get_package_dir("tool-avrdude"), "avrdude.conf"),
"-c", "$UPLOAD_PROTOCOL", "$UPLOAD_FLAGS"
]
framework_dir = ""
if env.get("PIOFRAMEWORK", []):
framework_dir = platform.get_package_dir(
platform.frameworks[env.get("PIOFRAMEWORK")[0]]['package'])
# Common for all bootloaders
lock_bits = board.get("bootloader.lock_bits", "0x0F")
unlock_bits = board.get("bootloader.unlock_bits", "0x3F")
bootloader_path = board.get("bootloader.file", "")
if core in ("MiniCore", "MegaCore", "MightyCore", "MajorCore"):
if not isfile(bootloader_path):
bootloader_path = get_suitable_optiboot_binary(framework_dir, board)
fuses_action = env.SConscript("fuses.py", exports="env")
else:
if not isfile(bootloader_path):
bootloader_path = join(
framework_dir, "bootloaders", bootloader_path)
if not board.get("bootloader", {}):
sys.stderr.write("Error: missing bootloader configuration!\n")
env.Exit(1)
lfuse = board.get("bootloader.lfuse", "")
hfuse = board.get("bootloader.hfuse", "")
efuse = board.get("bootloader.efuse", "")
if not all(f for f in (lfuse, hfuse)):
sys.stderr.write("Error: Missing bootloader fuses!\n")
env.Exit(1)
fuses_cmd = [
"-Ulock:w:%s:m" % unlock_bits,
"-Uhfuse:w:%s:m" % hfuse,
"-Ulfuse:w:%s:m" % lfuse
]
if efuse:
fuses_cmd.append("-Uefuse:w:%s:m" % efuse)
fuses_action = env.VerboseAction(
" ".join(common_cmd + fuses_cmd), "Setting fuses")
if not isfile(bootloader_path):
sys.stderr.write("Error: Couldn't find bootloader image\n")
env.Exit(1)
bootloader_flags = [
'-Uflash:w:"%s":i' % bootloader_path, "-Ulock:w:%s:m" % lock_bits]
bootloader_actions = [
fuses_action,
env.VerboseAction(" ".join(common_cmd + bootloader_flags),
"Uploading bootloader")
]
Return("bootloader_actions")
|
py | b4124819f8b7aeacbfbf4adbd4341faf4acb53f1 | from typing import Generic, TypeVar, Dict, Set, Any, Tuple, Union, Iterable
from collections import OrderedDict, defaultdict
from graphviz import Digraph, Graph
Dot = Union[Graph, Digraph]
N = TypeVar("N") # Node
class GraphVisualizer(Generic[N]):
def __init__(self):
self._nodes: Dict[N, Dict[str, Any]] = OrderedDict()
self._clusters: Dict[Any, Set[N]] = defaultdict(lambda: set())
self._edges: Dict[Tuple[N, N], Dict[str, Any]] = OrderedDict()
@property
def nodes(self) -> Iterable[N]:
return self._nodes.keys()
def edges(self) -> Iterable[Tuple[N, N]]:
return self._edges.keys()
def update_node(self, node: N, cluster: Any = None, **attrs):
attrs = {k: v for k, v in attrs.items() if v is not None}
if node in self._nodes:
self._nodes[node].update(attrs)
else:
self._nodes[node] = attrs
if cluster is not None:
self._clusters[cluster].add(node)
def update_edge(self, src: N, dst: N, **attrs):
key = (src, dst)
if key in self._edges:
self._edges[key].update(attrs)
else:
self._edges[key] = attrs
def add_to(self, dot: Dot) -> None:
remaining = OrderedDict(self._nodes)
for cluster_key, cluster_nodes in self._clusters.items():
for node in cluster_nodes:
with dot.subgraph(name=f"cluster_{cluster_key}") as c:
# c.attr(style="filled", color="lightgrey")
if node in remaining:
c.node(str(node), **remaining.pop(node))
else:
raise ValueError(f"node {node} appeared in multiple clusters")
for node, attrs in remaining.items():
dot.node(str(node), **self._nodes[node])
for (src, dst), attrs in self._edges.items():
dot.edge(str(src), str(dst), **attrs)
return dot
|
py | b412497c944857f69537d8c70fb0310a14b9d999 | import binascii
import platform
import sys
from ctypes import *
from CTF_ByteIO import ByteIO
from Loader import DataLoader
split = lambda A, n=3: [A[i:i + n] for i in range(0, len(A), n)]
is_win64 = platform.architecture(executable=sys.executable, bits='', linkage='')[0] == "64bit"
if __name__ == '__main__':
_lak_lib = r"E:\PYTHON_STUFF\CTF_ReaderV2\DecryptLib\build\Release\x64\Decrypter-x64.dll"
else:
_lak_lib = "Decrypter-x64.dll" if is_win64 else "Decrypter-x86.dll"
LAKDLL = windll.LoadLibrary(_lak_lib)
class Decrypter(DataLoader):
_get_key = LAKDLL.make_key
_get_key.argtypes = [c_char_p, c_char_p, c_char_p, c_bool]
_get_key.restype = POINTER(c_ubyte * 256)
_decode_chunk = LAKDLL.decode_chunk
_decode_chunk.argtypes = [c_char_p, c_int32, c_char]
_decode_chunk.restype = POINTER(c_ubyte)
def __init__(self):
self.key = bytes(256)
def generate_key(self, title, copyright, project, unicode):
title = create_string_buffer(title.encode('ascii'))
copyright = create_string_buffer(copyright.encode('ascii'))
project = create_string_buffer(project.encode('ascii'))
a = self._get_key(title, copyright, project, unicode)
self.key = bytes(list(a.contents))
def decode(self, chunk_data, chunk_size, magic_key=54, as_reader=False):
decoded_chunk_pointer = self._decode_chunk(chunk_data, chunk_size, magic_key)
decoded_chunk_pointer = cast(decoded_chunk_pointer, POINTER(c_ubyte * chunk_size))
if as_reader:
return ByteIO(byte_object=bytes(list(decoded_chunk_pointer.contents)))
else:
return bytes(list(decoded_chunk_pointer.contents))
def print_hex(self, data):
block_size = 16
for block in split(data, block_size):
char_acc = ''
for b in block:
if 32 < b < 128:
char_acc += chr(b)
else:
char_acc += '.'
b_len = len(block)
acc_len = b_len * 2 + block_size - 1
acc = binascii.hexlify(block)
acc = acc.upper().decode('ascii')
acc = ' '.join(split(acc, 2))
if b_len == block_size:
acc_len = len(acc)
if b_len < block_size:
acc += ' ' * ((acc_len - len(acc)) * 3)
acc += '\t|\t' + char_acc
print(acc)
def decode_mode3(self, chunk_data, chunk_size, chunk_id, magic_key=54, as_reader=False):
reader = ByteIO(byte_object=chunk_data)
decompressed_size = reader.read_uint32()
chunk_data = bytearray(reader.read_bytes())
if chunk_id & 0x1:
chunk_data[0] ^= (chunk_id & 0xFF) ^ (chunk_id >> 0x8)
chunk_data = bytes(chunk_data)
data = self.decode(chunk_data, chunk_size, magic_key, True)
compressed_size = data.read_uint32()
if as_reader:
return data.decompress_block(compressed_size, decompressed_size, True)
else:
return data.decompress_block(compressed_size, decompressed_size, False)
if __name__ == '__main__':
keygen = Decrypter()
keygen.generate_key(r'Ultimate Custom Night', 'Scott Cawthon', r'C:\Users\Scott\Desktop\FNAF 6\CustomNight-151.mfa',
False)
keygen.print_hex(keygen.key)
keygen.print_hex(keygen.decode(b'\xE5\xC0\x66\x76', 4, 54))
|
py | b4124a6256aa67cbd48a186caaca49930b2a82a1 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Endpoints to reset test data from database."""
from flask import g
from flask_restx import Namespace, Resource, cors
from auth_api import status as http_status
from auth_api.auth import jwt as _jwt
from auth_api.exceptions import BusinessException
from auth_api.services import ResetTestData as ResetService
from auth_api.tracer import Tracer
from auth_api.utils.roles import Role
from auth_api.utils.util import cors_preflight
API = Namespace('reset', description='Authentication System - Reset test data')
TRACER = Tracer.get_instance()
@cors_preflight('POST, PUT, OPTIONS')
@API.route('', methods=['POST', 'PUT', 'OPTIONS'])
class Reset(Resource):
"""Cleanup test data by the provided token."""
@staticmethod
@TRACER.trace()
@cors.crossdomain(origin='*')
@_jwt.has_one_of_roles([Role.TESTER.value])
def post():
"""Cleanup test data by the provided token."""
token = g.jwt_oidc_token_info
try:
ResetService.reset(token)
response, status = '', http_status.HTTP_204_NO_CONTENT
except BusinessException as exception:
response, status = {'code': exception.code, 'message': exception.message}, exception.status_code
return response, status
|
py | b4124a87754dfb32895aa733a82d70ab472e9234 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "sisock-"
cfg.versionfile_source = "sisock/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
py | b4124b06365d85bafae4c535aafed0a4d1501f65 | """
Module wraps some legacy code to construct a series of vtu files with 2D
CFD data on unstructured mesh from structured mesh in numpy format.
Code is not very general and likely only works for exact flow past cylinder
dataset used in this project. Note this code is meant to be a wrapper for
legacy code that is intended to not be used used very often or in a
critical/production setting. Therefore sustainability may be lacking.
"""
import vtktools
import numpy as np
from utils import get_grid_end_points
import os
import sys
if sys.version_info[0] < 3:
import u2r # noqa
else:
import u2rpy3 # noqa
u2r = u2rpy3
__author__ = " Claire Heaney, Zef Wolffs"
__credits__ = ["Jon Atli Tomasson"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zef Wolffs"
__email__ = "[email protected]"
__status__ = "Development"
def get_clean_vtk_file(filename):
"""
Removes fields and arrays from a vtk file,
leaving the coordinates/connectivity information.
"""
vtu_data = vtktools.vtu(filename)
clean_vtu = vtktools.vtu()
clean_vtu.ugrid.DeepCopy(vtu_data.ugrid)
fieldNames = clean_vtu.GetFieldNames()
# remove all fields and arrays from this vtu
for field in fieldNames:
clean_vtu.RemoveField(field)
fieldNames = clean_vtu.GetFieldNames()
vtkdata = clean_vtu.ugrid.GetCellData()
arrayNames = [
vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())
]
for array in arrayNames:
vtkdata.RemoveArray(array)
return clean_vtu
def create_vtu_file(
path, nNodes, value_mesh_twice_interp, filename, orig_vel, iTime, nDim=2
):
velocity_field = np.zeros((nNodes, 3))
velocity_field[:, 0:nDim] = np.transpose(
value_mesh_twice_interp[0:nDim, :]
)
# streamwise component only
difference = np.zeros((nNodes, 3))
difference[:, 0:nDim] = (
np.transpose(value_mesh_twice_interp[0:nDim, :]) - orig_vel
)
# streamwise component only
difference = difference / np.max(velocity_field)
clean_vtk = get_clean_vtk_file(filename)
new_vtu = vtktools.vtu()
new_vtu.ugrid.DeepCopy(clean_vtk.ugrid)
new_vtu.filename = path + "recon_" + str(iTime) + ".vtu"
new_vtu.AddField("Velocity", velocity_field)
new_vtu.AddField("Original", orig_vel)
new_vtu.AddField("Velocity_diff", difference)
new_vtu.Write()
return
def reconstruct(
snapshot_data_location="./../../data/FPC_Re3900_2D_CG_new/",
snapshot_file_base="fpc_",
reconstructed_file="reconstruction_test.npy", # POD coefficients
nGrids=4,
xlength=2.2,
ylength=0.41,
nTime=300,
field_names=["Velocity"],
offset=0
):
"""
Requires data in format (ngrids, nscalar, nx, ny, ntime)
Args:
snapshot_data_location (str, optional): location of sample vtu file.
Defaults to
`./../../data/FPC_Re3900_2D_CG_new/`.
snapshot_file_base (str, optional): file base of sample vtu file.
Defaults to `fpc_`.
reconstructed_file (str, optional): reconstruction data file. Defaults
to `reconstruction_test.npy`.
xlength (float, optional): length in x direction. Defaults to 2.2.
ylength (float, optional): length in y direction. Defaults to 0.41.
nTime (int, optional): number of timesteps. Defaults to 300.
field_names (list, optional): names of fields in vtu file. Defaults to
["Velocity"].
offset (int, optional): starting timestep. Defaults to 0.
"""
nFields = len(field_names)
# get a vtu file (any will do as the mesh is not adapted)
filename = snapshot_data_location + snapshot_file_base + "0.vtu"
representative_vtu = vtktools.vtu(filename)
coordinates = representative_vtu.GetLocations()
nNodes = coordinates.shape[0] # vtu_data.ugrid.GetNumberOfPoints()
nEl = representative_vtu.ugrid.GetNumberOfCells()
nScalar = 2 # dimension of fields
nDim = 2 # dimension of problem (no need to interpolate in dim no 3)
nloc = 3 # number of local nodes, ie three nodes per element (in 2D)
# get global node numbers
x_ndgln = np.zeros((nEl * nloc), dtype=int)
for iEl in range(nEl):
n = representative_vtu.GetCellPoints(iEl) + 1
x_ndgln[iEl * nloc: (iEl + 1) * nloc] = n
# set grid size
if nGrids == 4:
nx = 55
ny = 42
nz = 1 # nz = 1 for 2D problems
elif nGrids == 1:
nx = 221
ny = 42
nz = 1 # nz = 1 for 2D problems
else:
print("nx, ny, nz not known for ", nGrids, "grids")
x_all = np.transpose(coordinates[:, 0:nDim])
ddx = np.array((xlength / (nGrids * (nx - 1)), ylength / (ny - 1)))
grid_origin = [0.0, 0.0]
grid_width = [xlength / nGrids, 0.0]
# -------------------------------------------------------------------------------------------------
# find node duplications when superposing results
my_field = representative_vtu.GetField(field_names[0])[:, 0]
my_field = 1
nScalar_test = 1
# for one timestep
# for one field
value_mesh = np.zeros((nScalar_test, nNodes, 1)) # nTime=1
value_mesh[:, :, 0] = np.transpose(my_field)
superposed_grids = np.zeros((nNodes))
for iGrid in range(nGrids):
block_x_start = get_grid_end_points(grid_origin, grid_width, iGrid)
zeros_on_mesh = 0
value_grid = u2r.simple_interpolate_from_mesh_to_grid(
value_mesh,
x_all,
x_ndgln,
ddx,
block_x_start,
nx,
ny,
nz,
zeros_on_mesh,
nEl,
nloc,
nNodes,
nScalar_test,
nDim,
1,
)
zeros_on_grid = 1
value_back_on_mesh = u2r.interpolate_from_grid_to_mesh(
value_grid,
block_x_start,
ddx,
x_all,
zeros_on_grid,
nScalar_test,
nx,
ny,
nz,
nNodes,
nDim,
1,
)
superposed_grids = superposed_grids + np.rint(
np.squeeze(value_back_on_mesh)
)
superposed_grids = np.array(superposed_grids, dtype="int")
duplicated_nodal_values = []
for iNode in range(nNodes):
if superposed_grids[iNode] == 0:
# this is bad news - the node hasn't appeared in any grid
print("zero:", iNode)
elif superposed_grids[iNode] == 2:
print("two:", iNode)
# the node appears in two grids - deal with this later
duplicated_nodal_values.append(iNode)
elif superposed_grids[iNode] != 1:
# most of the nodes will appear in one grid
print("unknown:", iNode, superposed_grids[iNode])
reconstruction_on_mesh = np.zeros((nScalar * nTime, nNodes))
reconstructed = np.load(reconstructed_file)
for iGrid in range(nGrids):
reconstruction_grid = reconstructed[iGrid, :, :, :, :]
# reconstruction_grid here has the shape of (nScalar, nx, ny, nTime)
block_x_start = get_grid_end_points(grid_origin, grid_width, iGrid)
for iTime in range(nTime):
zeros_beyond_grid = 1 # 0 extrapolate solution; 1 gives zeros
reconstruction_on_mesh_from_one_grid = (
u2r.interpolate_from_grid_to_mesh(
reconstruction_grid[:, :, :, iTime],
block_x_start,
ddx,
x_all,
zeros_beyond_grid,
nScalar,
nx,
ny,
nz,
nNodes,
nDim,
1,
)
)
reconstruction_on_mesh[
nScalar * iTime: nScalar * (iTime + 1), :
] = reconstruction_on_mesh[
nScalar * iTime: nScalar * (iTime + 1), :
] + np.squeeze(
reconstruction_on_mesh_from_one_grid
)
reconstruction_on_mesh[:, duplicated_nodal_values] = (
0.5 * reconstruction_on_mesh[:, duplicated_nodal_values]
)
# for ifield in range(nFields):
# nDoF = nNodes # could be different value per field
# original_data.append(np.zeros((nNodes, nDim*nTime)))
original = np.zeros((nNodes, nDim * nTime))
for iTime in range(nTime):
filename = (
snapshot_data_location
+ snapshot_file_base
+ str(offset + iTime)
+ ".vtu"
)
vtu_data = vtktools.vtu(filename)
for iField in range(nFields):
my_field = vtu_data.GetField(field_names[iField])[:, 0:nDim]
original[:, iTime * nDim: (iTime + 1) * nDim] = my_field
# make diretory for results
path_to_reconstructed_results = "reconstructed_results/"
if not os.path.isdir(path_to_reconstructed_results):
os.mkdir(path_to_reconstructed_results)
template_vtu = snapshot_data_location + snapshot_file_base + "0.vtu"
for iTime in range(nTime):
create_vtu_file(
path_to_reconstructed_results,
nNodes,
reconstruction_on_mesh[iTime * nScalar: (iTime + 1) * nScalar, :],
template_vtu,
original[:, iTime * nDim: (iTime + 1) * nDim],
iTime,
)
if __name__ == "__main__":
reconstruct()
|
py | b4124b721973f420b9ffad02c0690c5cfec9918b | from django.conf import settings
from django.test import TestCase
from mock import Mock
from evennia.objects.objects import DefaultObject, DefaultCharacter, DefaultRoom, DefaultExit
from evennia.accounts.accounts import DefaultAccount
from evennia.scripts.scripts import DefaultScript
from evennia.server.serversession import ServerSession
from evennia.server.sessionhandler import SESSIONS
from evennia.utils import create
from evennia.utils.idmapper.models import flush_cache
SESSIONS.data_out = Mock()
SESSIONS.disconnect = Mock()
class EvenniaTest(TestCase):
"""
Base test for Evennia, sets up a basic environment.
"""
account_typeclass = DefaultAccount
object_typeclass = DefaultObject
character_typeclass = DefaultCharacter
exit_typeclass = DefaultExit
room_typeclass = DefaultRoom
script_typeclass = DefaultScript
def setUp(self):
"""
Sets up testing environment
"""
self.account = create.create_account("TestAccount", email="[email protected]", password="testpassword", typeclass=self.account_typeclass)
self.account2 = create.create_account("TestAccount2", email="[email protected]", password="testpassword", typeclass=self.account_typeclass)
self.room1 = create.create_object(self.room_typeclass, key="Room", nohome=True)
self.room1.db.desc = "room_desc"
settings.DEFAULT_HOME = "#%i" % self.room1.id # we must have a default home
# Set up fake prototype module for allowing tests to use named prototypes.
settings.PROTOTYPE_MODULES = "evennia.utils.tests.data.prototypes_example"
self.room2 = create.create_object(self.room_typeclass, key="Room2")
self.exit = create.create_object(self.exit_typeclass, key='out', location=self.room1, destination=self.room2)
self.obj1 = create.create_object(self.object_typeclass, key="Obj", location=self.room1, home=self.room1)
self.obj2 = create.create_object(self.object_typeclass, key="Obj2", location=self.room1, home=self.room1)
self.char1 = create.create_object(self.character_typeclass, key="Char", location=self.room1, home=self.room1)
self.char1.permissions.add("Developer")
self.char2 = create.create_object(self.character_typeclass, key="Char2", location=self.room1, home=self.room1)
self.char1.account = self.account
self.account.db._last_puppet = self.char1
self.char2.account = self.account2
self.account2.db._last_puppet = self.char2
self.script = create.create_script(self.script_typeclass, key="Script")
self.account.permissions.add("Developer")
# set up a fake session
dummysession = ServerSession()
dummysession.init_session("telnet", ("localhost", "testmode"), SESSIONS)
dummysession.sessid = 1
SESSIONS.portal_connect(dummysession.get_sync_data()) # note that this creates a new Session!
session = SESSIONS.session_from_sessid(1) # the real session
SESSIONS.login(session, self.account, testmode=True)
self.session = session
def tearDown(self):
flush_cache()
del SESSIONS[self.session.sessid]
self.account.delete()
self.account2.delete()
super(EvenniaTest, self).tearDown()
|
py | b4124c44c61d9c1665abc4398020fce56368376c | """
The Beer Distribution Problem with Extension for A Competitor Supply Node for the PuLP Modeller
"""
# Import PuLP modeler functions
from pulp import *
# Creates a list of all the supply nodes
Warehouses = ["A", "B", "C"]
# Creates a dictionary for the number of units of supply for each supply node
supply = {"A": 1000,
"B": 4000,
"C": 100}
# Creates a list of all demand nodes
Bars = ["1", "2", "3", "4", "5"]
# Creates a dictionary for the number of units of demand for each demand node
demand = {"1":500,
"2":900,
"3":1800,
"4":200,
"5":700,}
# Creates a list of costs of each transportation path
costs = [ #Bars
#1 2 3 4 5
[2,4,5,2,1],#A Warehouses
[3,1,3,2,3], #B
[0,0,0,0,0]
]
# The cost data is made into a dictionary
costs = makeDict([Warehouses,Bars],costs,0)
# Creates the 'prob' variable to contain the problem data
prob = LpProblem("Beer Distribution Problem",LpMinimize)
# Creates a list of tuples containing all the possible routes for transport
Routes = [(w,b) for w in Warehouses for b in Bars]
# A dictionary called 'Vars' is created to contain the referenced variables(the routes)
vars = LpVariable.dicts("Route",(Warehouses,Bars),0,None,LpInteger)
# The objective function is added to 'prob' first
prob += lpSum([vars[w][b]*costs[w][b] for (w,b) in Routes]), "Sum_of_Transporting_Costs"
# The supply maximum constraints are added to prob for each supply node (warehouse)
for w in Warehouses:
prob += lpSum([vars[w][b] for b in Bars])<=supply[w], "Sum_of_Products_out_of_Warehouse_%s"%w
# The demand minimum constraints are added to prob for each demand node (bar)
for b in Bars:
prob += lpSum([vars[w][b] for w in Warehouses])>=demand[b], "Sum_of_Products_into_Bar%s"%b
# The problem data is written to an .lp file
prob.writeLP("BeerDistributionProblem.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# Each of the variables is printed with it's resolved optimum value
for v in prob.variables():
print(v.name, "=", v.varValue)
# The optimised objective function value is printed to the screen
print("Total Cost of Transportation = ", value(prob.objective))
|
py | b4124de3ca1732af9de2e4c84b20b9b61da2d565 | import sys
import os
import time
sys.dont_write_bytecode = True
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import django
django.setup()
# Import your models for use in your script
from db.models import *
from extract import base_url
from extract.act import get_act_details, get_act_txt
from extract.act_list import get_act_list_single_page
urls = [
"ukpga",
"ukla",
"uksi",
]
urls_max = [
90000,
90000,
90000,
]
start = time.time()
for index, url in enumerate(urls):
print(f"started fetching '{base_url}{url}' ...")
page = 0
count = 0
stored_exception = None
second_chance = False
while True:
page += 1
page_loaded = Page.objects.filter(url=url, num=page).count() > 0
if page_loaded:
continue
acts = []
try:
print(f'fetching page "{base_url}{url}?page={page}"')
acts = get_act_list_single_page(url, page)
except KeyboardInterrupt or SystemExit:
stored_exception = sys.exc_info()
break
except Exception as e:
print(f'error fetching page "{base_url}{url}?page={page} (error:{e})"')
continue
if len(acts) == 0 and second_chance:
break
second_chance = len(acts) == 0
for index_, act in enumerate(acts):
try:
act = get_act_details(act)
text = get_act_txt(act['files']['.xht'])
except KeyboardInterrupt or SystemExit:
stored_exception = sys.exc_info()
break
except Exception as e:
print(f'error fetching act {act["url"]} (error:{e})')
continue
if 'skipped' in act.keys():
continue
count += 1
if count == urls_max[index]:
stored_exception = "None"
end = time.time()
print(end - start)
break
Act.objects.create(url=act['url'], title=act['title'], text=text, type=act['type'], year=act['year'],
number=act['number'])
Page.objects.create(url=url, num=page)
print(f'total processed acts :{count}')
if stored_exception:
print("Either user stopped the process or max act count limit reached!")
break
if stored_exception:
print("Either user stopped the process or max act count limit reached!")
break
print(f"finished fetching '{base_url}{url}' ...")
|
py | b4124f2b7e60720a40f7adb3032abd1884608059 | numbers = list(range(3, 31, 3))
for number in numbers:
print(number) |
py | b4124f5127a5428dcfb909465dbcfc5e214f12ef | #!/usr/bin/env python
#
# File: $Id$
#
"""
Simple example to test the code for the TSL2561 luminosity sensor
on a raspberry pi or beaglebone black
"""
# system imports
#
from datetime import datetime
import time
# 3rd party imports
#
from TSL2561.TSL2561 import TSL2561
#############################################################################
#
def main():
tsl = TSL2561(gain=TSL2561.GAIN_16X)
print "TSL id: {}, {}".format(*tsl.get_id())
while True:
print "{}".format(datetime.now())
broadband, ir = tsl.get_raw_data()
print "Raw values - broadband: {}, IR: {}".format(broadband, ir)
broadband, ir = tsl.get_luminosity()
print "Luminosity (auto-gain): broadband: {}, IR: {}".format(broadband,
ir)
print "Lux: {}".format(tsl.get_lux())
print ""
time.sleep(5)
############################################################################
############################################################################
#
# Here is where it all starts
#
if __name__ == '__main__':
main()
#
############################################################################
############################################################################
|
py | b4124ff79aca521f4c56c4e8fdd1bfc570d0bc3e | import decimal
import json
from datetime import datetime
import ujson
from geojson import GeoJSONEncoder
from opennem.core.dispatch_type import DispatchType, dispatch_type_string
class OpenNEMJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, DispatchType):
return dispatch_type_string(o)
return super(OpenNEMJSONEncoder, self).default(o)
class OpenNEMGeoJSONEncoder(GeoJSONEncoder, OpenNEMJSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, DispatchType):
return dispatch_type_string(o)
return super(OpenNEMGeoJSONEncoder, self).default(o)
def opennem_deserialize(serialized: str) -> any:
obj_serialized = None
# try ujson first because it's faster
try:
obj_serialized = ujson.loads(serialized)
except TypeError:
pass
if not obj_serialized:
obj_serialized = json.loads(serialized, cls=OpenNEMGeoJSONEncoder)
return obj_serialized
def opennem_serialize(obj: any, indent=None) -> str:
obj_deserialized = None
# try ujson first because it's faster
# try:
# obj_deserialized = ujson.dumps(obj, indent=indent)
# except TypeError:
# pass
if not obj_deserialized:
obj_deserialized = json.dumps(
obj, cls=OpenNEMGeoJSONEncoder, indent=indent
)
return obj_deserialized
|
py | b41250f6dac179d01dd0fe6a6622f8e9400dc49a | # -*- coding: utf-8 -*-
# Copyright (c) 2015, New Indictrans Technologies Pvt Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Language')
class TestLanguage(unittest.TestCase):
pass
|
py | b4125158fc1400a8676f8436edda8de280a1c327 | import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_2_1/RelValSingleMuPt100/GEN-SIM-RECO/MC_31X_V3-v1/0006/DC15F12B-9477-DE11-B1E0-000423D98C20.root',
'/store/relval/CMSSW_3_2_1/RelValSingleMuPt100/GEN-SIM-RECO/MC_31X_V3-v1/0006/40D6FEFD-8F77-DE11-95A7-001D09F27067.root',
'/store/relval/CMSSW_3_2_1/RelValSingleMuPt100/GEN-SIM-RECO/MC_31X_V3-v1/0005/50EE1208-8177-DE11-8B17-001D09F231B0.root'
)
)
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(100)
)
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load("Configuration.StandardSequences.Reconstruction_cff")
from Configuration.StandardSequences.Reconstruction_cff import *
process.muonAnalyzer = cms.EDAnalyzer("MuonTimingValidator",
TKtracks = cms.untracked.InputTag("generalTracks"),
STAtracks = cms.untracked.InputTag("standAloneMuons"),
Muons = cms.untracked.InputTag("muons"),
nbins = cms.int32(60),
PtresMax = cms.double(2000.0),
CombinedTiming = cms.untracked.InputTag("muontiming","combined"),
DtTiming = cms.untracked.InputTag("muontiming","dt"),
CscTiming = cms.untracked.InputTag("muontiming","csc"),
simPtMin = cms.double(5.0),
PtresMin = cms.double(-1000.0),
PtCut = cms.double(1.0),
etaMax = cms.double(2.4),
etaMin = cms.double(0.0),
PlotScale = cms.double(1.0),
DTcut = cms.int32(8),
CSCcut = cms.int32(4),
open = cms.string('recreate'),
out = cms.string('test_timing.root')
)
process.prefer("GlobalTag")
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '80X_dataRun2_Prompt_v9', '')
process.p = cms.Path(muontiming)
process.mutest = cms.Path(process.muonAnalyzer)
process.schedule = cms.Schedule(process.p,process.mutest)
# process.schedule = cms.Schedule(process.mutest)
|
py | b41251fa560e7b118d6c72316a81c25f2070ea8a | import csv
from dagster import (
Path,
Selector,
SerializationStrategy,
execute_pipeline,
input_hydration_config,
pipeline,
solid,
usable_as_dagster_type,
)
class CsvSerializationStrategy(SerializationStrategy):
def __init__(self):
super(CsvSerializationStrategy, self).__init__(
'csv_strategy', read_mode='r', write_mode='w'
)
def serialize(self, value, write_file_obj):
fieldnames = value[0]
writer = csv.DictWriter(write_file_obj, fieldnames)
writer.writeheader()
writer.writerows(value)
def deserialize(self, read_file_obj):
reader = csv.DictReader(read_file_obj)
return LessSimpleDataFrame([row for row in reader])
@input_hydration_config(Selector({'pickle': Path}))
def less_simple_data_frame_input_hydration_config(context, selector):
with open(selector['pickle'], 'r') as fd:
lines = [row for row in csv.DictReader(fd)]
context.log.info('Read {n_lines} lines'.format(n_lines=len(lines)))
return LessSimpleDataFrame(lines)
@usable_as_dagster_type(
name='LessSimpleDataFrame',
description=(
'A naive representation of a data frame, e.g., as returned by '
'csv.DictReader.'
),
serialization_strategy=CsvSerializationStrategy(),
input_hydration_config=less_simple_data_frame_input_hydration_config,
)
class LessSimpleDataFrame(list):
pass
@solid
def read_csv(context, csv_path: str) -> LessSimpleDataFrame:
with open(csv_path, 'r') as fd:
lines = [row for row in csv.DictReader(fd)]
context.log.info('Read {n_lines} lines'.format(n_lines=len(lines)))
return LessSimpleDataFrame(lines)
@solid
def sort_by_calories(context, cereals: LessSimpleDataFrame):
sorted_cereals = sorted(cereals, key=lambda cereal: cereal['calories'])
context.log.info(
'Least caloric cereal: {least_caloric}'.format(
least_caloric=sorted_cereals[0]['name']
)
)
context.log.info(
'Most caloric cereal: {most_caloric}'.format(
most_caloric=sorted_cereals[-1]['name']
)
)
return LessSimpleDataFrame(sorted_cereals)
@pipeline
def serialization_strategy_pipeline():
sort_by_calories(read_csv())
if __name__ == '__main__':
environment_dict = {
'solids': {
'read_csv': {'inputs': {'csv_path': {'value': 'cereal.csv'}}}
},
'storage': {'filesystem': {}},
}
result = execute_pipeline(
serialization_strategy_pipeline, environment_dict=environment_dict
)
assert result.success
|
py | b41252ebe53cab72465455acdd36e3c32f5001b0 | class Solution:
def getRow(self, rowIndex):
def n_c_r(n, r):
numerator = 1
for i in xrange(1, r + 1):
numerator *= n - (i - 1)
numerator /= i
return numerator
return [n_c_r(rowIndex, i) for i in xrange(rowIndex + 1)]
|
py | b41253efcb024607c701d4c72c220d433b0ba2be | import string
class Template:
template_path = None
template_context = None
def __init__(self, config):
self.config = config
with open(self.template_path, 'r') as fil:
self.source = fil.read()
def preprocess(self, template):
return template
def generate(self, request_context):
# FIXME: Optimize
context = self.template_context.copy()
for key, val in context.items():
context[key] = string.Template(val).substitute(request_context)
template = string.Template(self.source).substitute(context)
output = self.preprocess(template)
return output
|
py | b41253fc89950dcbd95e2c049a19de409c9fa5dc | # Add your Python code here. E.g.
from microbit import *
"""
A simple maze program. You are the flashing dot and can walk around
using the accelerometer.
"""
import microbit
d = microbit.display
ac = microbit.accelerometer
# the maze data, as binary numbers (outside walls are added automatically)
maze = [
0b0000000000000000,
0b0100010101011110,
0b0100010101010010,
0b0111110100000000,
0b0000000111111110,
0b0111111101000000,
0b0101010001011100,
0b0101000100000100,
0b0100011111111100,
0b0101010001000110,
0b0101000100010010,
0b0101010111010110,
0b0111010101010010,
0b0000010100010010,
0b0111110111111110,
0b0000000000000000,
]
def get_maze(x, y):
if 0 <= x < 16 and 0 <= y < 16:
return (maze[y] >> (15 - x)) & 1
else:
return 1
def draw(x, y, tick):
img = microbit.Image(5,5)
for j in range(5):
for i in range(5):
img.set_pixel(i, j, get_maze(x + i - 2, y + j - 2)*5)
# draw the player, flashing
img.set_pixel(2, 2, (tick & 1)*4+5)
d.show(img)
def main():
x = 0
y = 0
tick = 0
while True:
tick += 1
if tick == 4:
# walk around, with collision detection
tick = 0
if ac.get_x() > 200 and get_maze(x + 1, y) == 0:
x += 1
elif ac.get_x() < -200 and get_maze(x - 1, y) == 0:
x -= 1
elif ac.get_y() > 200 and get_maze(x, y + 1) == 0:
y += 1
elif ac.get_y() < -200 and get_maze(x, y - 1) == 0:
y -= 1
x = min(15, max(0, x))
y = min(15, max(0, y))
# draw the maze
draw(x, y, tick)
microbit.sleep(50)
main() |
py | b41254eeabd93a9f31c1c4c1962737ecedd32660 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import time
import unittest
from datetime import datetime
from azure import WindowsAzureError
from azure.storage import (
Entity,
EntityProperty,
StorageServiceProperties,
TableService,
)
from util import (
AzureTestCase,
credentials,
getUniqueName,
set_service_options,
)
#------------------------------------------------------------------------------
MAX_RETRY = 60
#------------------------------------------------------------------------------
class TableServiceTest(AzureTestCase):
def setUp(self):
self.ts = TableService(credentials.getStorageServicesName(),
credentials.getStorageServicesKey())
set_service_options(self.ts)
self.table_name = getUniqueName('uttable')
self.additional_table_names = []
def tearDown(self):
self.cleanup()
return super(TableServiceTest, self).tearDown()
def cleanup(self):
try:
self.ts.delete_table(self.table_name)
except:
pass
for name in self.additional_table_names:
try:
self.ts.delete_table(name)
except:
pass
#--Helpers-----------------------------------------------------------------
def _create_table(self, table_name):
'''
Creates a table with the specified name.
'''
self.ts.create_table(table_name, True)
def _create_table_with_default_entities(self, table_name, entity_count):
'''
Creates a table with the specified name and adds entities with the
default set of values. PartitionKey is set to 'MyPartition' and RowKey
is set to a unique counter value starting at 1 (as a string).
'''
entities = []
self._create_table(table_name)
for i in range(1, entity_count + 1):
entities.append(self.ts.insert_entity(
table_name,
self._create_default_entity_dict('MyPartition', str(i))))
return entities
def _create_default_entity_class(self, partition, row):
'''
Creates a class-based entity with fixed values, using all
of the supported data types.
'''
# TODO: Edm.Binary and null
entity = Entity()
entity.PartitionKey = partition
entity.RowKey = row
entity.age = 39
entity.sex = 'male'
entity.married = True
entity.deceased = False
entity.optional = None
entity.ratio = 3.1
entity.large = 9333111000
entity.Birthday = datetime(1973, 10, 4)
entity.birthday = datetime(1970, 10, 4)
entity.binary = None
entity.other = EntityProperty('Edm.Int64', 20)
entity.clsid = EntityProperty(
'Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
def _create_default_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, using all
of the supported data types.
'''
# TODO: Edm.Binary and null
return {'PartitionKey': partition,
'RowKey': row,
'age': 39,
'sex': 'male',
'married': True,
'deceased': False,
'optional': None,
'ratio': 3.1,
'large': 9333111000,
'Birthday': datetime(1973, 10, 4),
'birthday': datetime(1970, 10, 4),
'binary': EntityProperty('Edm.Binary', None),
'other': EntityProperty('Edm.Int64', 20),
'clsid': EntityProperty(
'Edm.Guid',
'c9da6455-213d-42c9-9a79-3e9149a57833')}
def _create_updated_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, with a
different set of values than the default entity. It
adds fields, changes field values, changes field types,
and removes fields when compared to the default entity.
'''
return {'PartitionKey': partition,
'RowKey': row,
'age': 'abc',
'sex': 'female',
'sign': 'aquarius',
'birthday': datetime(1991, 10, 4)}
def _assert_default_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity.
'''
self.assertEqual(entity.age, 39)
self.assertEqual(entity.sex, 'male')
self.assertEqual(entity.married, True)
self.assertEqual(entity.deceased, False)
self.assertFalse(hasattr(entity, "aquarius"))
self.assertEqual(entity.ratio, 3.1)
self.assertEqual(entity.large, 9333111000)
self.assertEqual(entity.Birthday, datetime(1973, 10, 4))
self.assertEqual(entity.birthday, datetime(1970, 10, 4))
self.assertEqual(entity.other, 20)
self.assertIsInstance(entity.clsid, EntityProperty)
self.assertEqual(entity.clsid.type, 'Edm.Guid')
self.assertEqual(entity.clsid.value,
'c9da6455-213d-42c9-9a79-3e9149a57833')
def _assert_updated_entity(self, entity):
'''
Asserts that the entity passed in matches the updated entity.
'''
self.assertEqual(entity.age, 'abc')
self.assertEqual(entity.sex, 'female')
self.assertFalse(hasattr(entity, "married"))
self.assertFalse(hasattr(entity, "deceased"))
self.assertEqual(entity.sign, 'aquarius')
self.assertFalse(hasattr(entity, "optional"))
self.assertFalse(hasattr(entity, "ratio"))
self.assertFalse(hasattr(entity, "large"))
self.assertFalse(hasattr(entity, "Birthday"))
self.assertEqual(entity.birthday, datetime(1991, 10, 4))
self.assertFalse(hasattr(entity, "other"))
self.assertFalse(hasattr(entity, "clsid"))
def _assert_merged_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity
merged with the updated entity.
'''
self.assertEqual(entity.age, 'abc')
self.assertEqual(entity.sex, 'female')
self.assertEqual(entity.sign, 'aquarius')
self.assertEqual(entity.married, True)
self.assertEqual(entity.deceased, False)
self.assertEqual(entity.sign, 'aquarius')
self.assertEqual(entity.ratio, 3.1)
self.assertEqual(entity.large, 9333111000)
self.assertEqual(entity.Birthday, datetime(1973, 10, 4))
self.assertEqual(entity.birthday, datetime(1991, 10, 4))
self.assertEqual(entity.other, 20)
self.assertIsInstance(entity.clsid, EntityProperty)
self.assertEqual(entity.clsid.type, 'Edm.Guid')
self.assertEqual(entity.clsid.value,
'c9da6455-213d-42c9-9a79-3e9149a57833')
#--Test cases for table service -------------------------------------------
def test_get_set_table_service_properties(self):
table_properties = self.ts.get_table_service_properties()
self.ts.set_table_service_properties(table_properties)
tests = [('logging.delete', True),
('logging.delete', False),
('logging.read', True),
('logging.read', False),
('logging.write', True),
('logging.write', False),
]
for path, value in tests:
# print path
cur = table_properties
for component in path.split('.')[:-1]:
cur = getattr(cur, component)
last_attr = path.split('.')[-1]
setattr(cur, last_attr, value)
self.ts.set_table_service_properties(table_properties)
retry_count = 0
while retry_count < MAX_RETRY:
table_properties = self.ts.get_table_service_properties()
cur = table_properties
for component in path.split('.'):
cur = getattr(cur, component)
if value == cur:
break
time.sleep(1)
retry_count += 1
self.assertEqual(value, cur)
def test_table_service_retention_single_set(self):
table_properties = self.ts.get_table_service_properties()
table_properties.logging.retention_policy.enabled = False
table_properties.logging.retention_policy.days = 5
# TODO: Better error, ValueError?
self.assertRaises(WindowsAzureError,
self.ts.set_table_service_properties,
table_properties)
table_properties = self.ts.get_table_service_properties()
table_properties.logging.retention_policy.days = None
table_properties.logging.retention_policy.enabled = True
# TODO: Better error, ValueError?
self.assertRaises(WindowsAzureError,
self.ts.set_table_service_properties,
table_properties)
def test_table_service_set_both(self):
table_properties = self.ts.get_table_service_properties()
table_properties.logging.retention_policy.enabled = True
table_properties.logging.retention_policy.days = 5
self.ts.set_table_service_properties(table_properties)
table_properties = self.ts.get_table_service_properties()
self.assertEqual(
True, table_properties.logging.retention_policy.enabled)
self.assertEqual(5, table_properties.logging.retention_policy.days)
#--Test cases for tables --------------------------------------------------
def test_create_table(self):
# Arrange
# Act
created = self.ts.create_table(self.table_name)
# Assert
self.assertTrue(created)
def test_create_table_fail_on_exist(self):
# Arrange
# Act
created = self.ts.create_table(self.table_name, True)
# Assert
self.assertTrue(created)
def test_create_table_with_already_existing_table(self):
# Arrange
# Act
created1 = self.ts.create_table(self.table_name)
created2 = self.ts.create_table(self.table_name)
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
def test_create_table_with_already_existing_table_fail_on_exist(self):
# Arrange
# Act
created = self.ts.create_table(self.table_name)
with self.assertRaises(WindowsAzureError):
self.ts.create_table(self.table_name, True)
# Assert
self.assertTrue(created)
def test_query_tables(self):
# Arrange
self._create_table(self.table_name)
# Act
tables = self.ts.query_tables()
for table in tables:
pass
# Assert
tableNames = [x.name for x in tables]
self.assertGreaterEqual(len(tableNames), 1)
self.assertGreaterEqual(len(tables), 1)
self.assertIn(self.table_name, tableNames)
def test_query_tables_with_table_name(self):
# Arrange
self._create_table(self.table_name)
# Act
tables = self.ts.query_tables(self.table_name)
for table in tables:
pass
# Assert
self.assertEqual(len(tables), 1)
self.assertEqual(tables[0].name, self.table_name)
def test_query_tables_with_table_name_no_tables(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.ts.query_tables(self.table_name)
# Assert
def test_query_tables_with_top(self):
# Arrange
self.additional_table_names = [
self.table_name + suffix for suffix in 'abcd']
for name in self.additional_table_names:
self.ts.create_table(name)
# Act
tables = self.ts.query_tables(None, 3)
for table in tables:
pass
# Assert
self.assertEqual(len(tables), 3)
def test_query_tables_with_top_and_next_table_name(self):
# Arrange
self.additional_table_names = [
self.table_name + suffix for suffix in 'abcd']
for name in self.additional_table_names:
self.ts.create_table(name)
# Act
tables_set1 = self.ts.query_tables(None, 3)
tables_set2 = self.ts.query_tables(
None, 3, tables_set1.x_ms_continuation['NextTableName'])
# Assert
self.assertEqual(len(tables_set1), 3)
self.assertGreaterEqual(len(tables_set2), 1)
self.assertLessEqual(len(tables_set2), 3)
def test_delete_table_with_existing_table(self):
# Arrange
self._create_table(self.table_name)
# Act
deleted = self.ts.delete_table(self.table_name)
# Assert
self.assertTrue(deleted)
tables = self.ts.query_tables()
self.assertNamedItemNotInContainer(tables, self.table_name)
def test_delete_table_with_existing_table_fail_not_exist(self):
# Arrange
self._create_table(self.table_name)
# Act
deleted = self.ts.delete_table(self.table_name, True)
# Assert
self.assertTrue(deleted)
tables = self.ts.query_tables()
self.assertNamedItemNotInContainer(tables, self.table_name)
def test_delete_table_with_non_existing_table(self):
# Arrange
# Act
deleted = self.ts.delete_table(self.table_name)
# Assert
self.assertFalse(deleted)
def test_delete_table_with_non_existing_table_fail_not_exist(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.ts.delete_table(self.table_name, True)
# Assert
#--Test cases for entities ------------------------------------------
def test_insert_entity_dictionary(self):
# Arrange
self._create_table(self.table_name)
# Act
dict = self._create_default_entity_dict('MyPartition', '1')
resp = self.ts.insert_entity(self.table_name, dict)
# Assert
self.assertIsNotNone(resp)
def test_insert_entity_class_instance(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = self._create_default_entity_class('MyPartition', '1')
resp = self.ts.insert_entity(self.table_name, entity)
# Assert
self.assertIsNotNone(resp)
def test_insert_entity_conflict(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.insert_entity(
self.table_name,
self._create_default_entity_dict('MyPartition', '1'))
# Assert
def test_get_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.get_entity(self.table_name, 'MyPartition', '1')
# Assert
self.assertEqual(resp.PartitionKey, 'MyPartition')
self.assertEqual(resp.RowKey, '1')
self._assert_default_entity(resp)
def test_get_entity_not_existing(self):
# Arrange
self._create_table(self.table_name)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.get_entity(self.table_name, 'MyPartition', '1')
# Assert
def test_get_entity_with_select(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.get_entity(
self.table_name, 'MyPartition', '1', 'age,sex')
# Assert
self.assertEqual(resp.age, 39)
self.assertEqual(resp.sex, 'male')
self.assertFalse(hasattr(resp, "birthday"))
self.assertFalse(hasattr(resp, "married"))
self.assertFalse(hasattr(resp, "deceased"))
def test_query_entities(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
# Act
resp = self.ts.query_entities(self.table_name)
# Assert
self.assertEqual(len(resp), 2)
for entity in resp:
self.assertEqual(entity.PartitionKey, 'MyPartition')
self._assert_default_entity(entity)
self.assertEqual(resp[0].RowKey, '1')
self.assertEqual(resp[1].RowKey, '2')
def test_query_entities_with_filter(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
self.ts.insert_entity(
self.table_name,
self._create_default_entity_dict('MyOtherPartition', '3'))
# Act
resp = self.ts.query_entities(
self.table_name, "PartitionKey eq 'MyPartition'")
# Assert
self.assertEqual(len(resp), 2)
for entity in resp:
self.assertEqual(entity.PartitionKey, 'MyPartition')
self._assert_default_entity(entity)
def test_query_entities_with_select(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
# Act
resp = self.ts.query_entities(self.table_name, None, 'age,sex')
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].age, 39)
self.assertEqual(resp[0].sex, 'male')
self.assertFalse(hasattr(resp[0], "birthday"))
self.assertFalse(hasattr(resp[0], "married"))
self.assertFalse(hasattr(resp[0], "deceased"))
def test_query_entities_with_top(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 3)
# Act
resp = self.ts.query_entities(self.table_name, None, None, 2)
# Assert
self.assertEqual(len(resp), 2)
def test_query_entities_with_top_and_next(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 5)
# Act
resp1 = self.ts.query_entities(self.table_name, None, None, 2)
resp2 = self.ts.query_entities(
self.table_name, None, None, 2,
resp1.x_ms_continuation['NextPartitionKey'],
resp1.x_ms_continuation['NextRowKey'])
resp3 = self.ts.query_entities(
self.table_name, None, None, 2,
resp2.x_ms_continuation['NextPartitionKey'],
resp2.x_ms_continuation['NextRowKey'])
# Assert
self.assertEqual(len(resp1), 2)
self.assertEqual(len(resp2), 2)
self.assertEqual(len(resp3), 1)
self.assertEqual(resp1[0].RowKey, '1')
self.assertEqual(resp1[1].RowKey, '2')
self.assertEqual(resp2[0].RowKey, '3')
self.assertEqual(resp2[1].RowKey, '4')
self.assertEqual(resp3[0].RowKey, '5')
def test_update_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.update_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_update_entity_with_if_matches(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.update_entity(
self.table_name,
'MyPartition', '1', sent_entity, if_match=entities[0].etag)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_update_entity_with_if_doesnt_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
with self.assertRaises(WindowsAzureError):
self.ts.update_entity(
self.table_name, 'MyPartition', '1', sent_entity,
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
# Assert
def test_insert_or_merge_entity_with_existing_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_merged_entity(received_entity)
def test_insert_or_merge_entity_with_non_existing_entity(self):
# Arrange
self._create_table(self.table_name)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_insert_or_replace_entity_with_existing_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_replace_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_insert_or_replace_entity_with_non_existing_entity(self):
# Arrange
self._create_table(self.table_name)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_replace_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_merge_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_merged_entity(received_entity)
def test_merge_entity_not_existing(self):
# Arrange
self._create_table(self.table_name)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
with self.assertRaises(WindowsAzureError):
self.ts.merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
def test_merge_entity_with_if_matches(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.merge_entity(
self.table_name, 'MyPartition', '1',
sent_entity, if_match=entities[0].etag)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_merged_entity(received_entity)
def test_merge_entity_with_if_doesnt_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
with self.assertRaises(WindowsAzureError):
self.ts.merge_entity(
self.table_name, 'MyPartition', '1', sent_entity,
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
# Assert
def test_delete_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.delete_entity(self.table_name, 'MyPartition', '1')
# Assert
self.assertIsNone(resp)
with self.assertRaises(WindowsAzureError):
self.ts.get_entity(self.table_name, 'MyPartition', '1')
def test_delete_entity_not_existing(self):
# Arrange
self._create_table(self.table_name)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.delete_entity(self.table_name, 'MyPartition', '1')
# Assert
def test_delete_entity_with_if_matches(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.delete_entity(
self.table_name, 'MyPartition', '1', if_match=entities[0].etag)
# Assert
self.assertIsNone(resp)
with self.assertRaises(WindowsAzureError):
self.ts.get_entity(self.table_name, 'MyPartition', '1')
def test_delete_entity_with_if_doesnt_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.delete_entity(
self.table_name, 'MyPartition', '1',
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
# Assert
#--Test cases for batch ---------------------------------------------
def test_with_filter_single(self):
called = []
def my_filter(request, next):
called.append(True)
return next(request)
tc = self.ts.with_filter(my_filter)
tc.create_table(self.table_name)
self.assertTrue(called)
del called[:]
tc.delete_table(self.table_name)
self.assertTrue(called)
del called[:]
def test_with_filter_chained(self):
called = []
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
tc = self.ts.with_filter(filter_a).with_filter(filter_b)
tc.create_table(self.table_name)
self.assertEqual(called, ['b', 'a'])
tc.delete_table(self.table_name)
def test_batch_insert(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.begin_batch()
self.ts.insert_entity(self.table_name, entity)
self.ts.commit_batch()
# Assert
result = self.ts.get_entity(self.table_name, '001', 'batch_insert')
self.assertIsNotNone(result)
def test_batch_update(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_update'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_update')
self.assertEqual(3, entity.test3)
entity.test2 = 'value1'
self.ts.begin_batch()
self.ts.update_entity(self.table_name, '001', 'batch_update', entity)
self.ts.commit_batch()
entity = self.ts.get_entity(self.table_name, '001', 'batch_update')
# Assert
self.assertEqual('value1', entity.test2)
def test_batch_merge(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_merge')
self.assertEqual(3, entity.test3)
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test2 = 'value1'
self.ts.begin_batch()
self.ts.merge_entity(self.table_name, '001', 'batch_merge', entity)
self.ts.commit_batch()
entity = self.ts.get_entity(self.table_name, '001', 'batch_merge')
# Assert
self.assertEqual('value1', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_insert_replace(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_replace'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.begin_batch()
self.ts.insert_or_replace_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.ts.commit_batch()
entity = self.ts.get_entity(
self.table_name, '001', 'batch_insert_replace')
# Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_insert_merge(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_merge'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.begin_batch()
self.ts.insert_or_merge_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.ts.commit_batch()
entity = self.ts.get_entity(
self.table_name, '001', 'batch_insert_merge')
# Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_delete(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_delete'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_delete')
#self.assertEqual(3, entity.test3)
self.ts.begin_batch()
self.ts.delete_entity(self.table_name, '001', 'batch_delete')
self.ts.commit_batch()
def test_batch_inserts(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
self.ts.begin_batch()
for i in range(100):
entity.RowKey = str(i)
self.ts.insert_entity(self.table_name, entity)
self.ts.commit_batch()
entities = self.ts.query_entities(
self.table_name, "PartitionKey eq 'batch_inserts'", '')
# Assert
self.assertIsNotNone(entities)
self.assertEqual(100, len(entities))
def test_batch_all_operations_together(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-2'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-3'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-4'
self.ts.insert_entity(self.table_name, entity)
self.ts.begin_batch()
entity.RowKey = 'batch_all_operations_together'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-1'
self.ts.delete_entity(
self.table_name, entity.PartitionKey, entity.RowKey)
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
self.ts.update_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
self.ts.merge_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
self.ts.insert_or_replace_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-5'
self.ts.insert_or_merge_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.ts.commit_batch()
# Assert
entities = self.ts.query_entities(
self.table_name, "PartitionKey eq '003'", '')
self.assertEqual(5, len(entities))
def test_batch_same_row_operations_fail(self):
# Arrange
self._create_table(self.table_name)
entity = self._create_default_entity_dict('001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.begin_batch()
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
self.ts.update_entity(
self.table_name,
entity['PartitionKey'],
entity['RowKey'], entity)
entity = self._create_default_entity_dict(
'001', 'batch_negative_1')
self.ts.merge_entity(
self.table_name,
entity['PartitionKey'],
entity['RowKey'], entity)
self.ts.cancel_batch()
# Assert
def test_batch_different_partition_operations_fail(self):
# Arrange
self._create_table(self.table_name)
entity = self._create_default_entity_dict('001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.begin_batch()
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
self.ts.update_entity(
self.table_name, entity['PartitionKey'], entity['RowKey'],
entity)
entity = self._create_default_entity_dict(
'002', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
self.ts.cancel_batch()
# Assert
def test_batch_different_table_operations_fail(self):
# Arrange
other_table_name = self.table_name + 'other'
self.additional_table_names = [other_table_name]
self._create_table(self.table_name)
self._create_table(other_table_name)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.begin_batch()
entity = self._create_default_entity_dict(
'001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
entity = self._create_default_entity_dict(
'001', 'batch_negative_2')
self.ts.insert_entity(other_table_name, entity)
self.ts.cancel_batch()
def test_unicode_property_value(self):
''' regression test for github issue #57'''
# Act
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test1', 'Description': u'ꀕ'})
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test2', 'Description': 'ꀕ'})
resp = self.ts.query_entities(
self.table_name, "PartitionKey eq 'test'")
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].Description, u'ꀕ')
self.assertEqual(resp[1].Description, u'ꀕ')
def test_unicode_property_name(self):
# Act
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test1', u'啊齄丂狛狜': u'ꀕ'})
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test2', u'啊齄丂狛狜': 'hello'})
resp = self.ts.query_entities(
self.table_name, "PartitionKey eq 'test'")
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].__dict__[u'啊齄丂狛狜'], u'ꀕ')
self.assertEqual(resp[1].__dict__[u'啊齄丂狛狜'], u'hello')
def test_unicode_create_table_unicode_name(self):
# Arrange
self.table_name = self.table_name + u'啊齄丂狛狜'
# Act
with self.assertRaises(WindowsAzureError):
# not supported - table name must be alphanumeric, lowercase
self.ts.create_table(self.table_name)
# Assert
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
py | b412553d0600da2350fe0883ed60a8bded663e4c | #!/usr/bin/env python
"""Move integration details from the Configuration file into the
database as ExternalIntegrations
"""
import os
import sys
import json
import logging
import uuid
from nose.tools import set_trace
bin_dir = os.path.split(__file__)[0]
package_dir = os.path.join(bin_dir, "..")
sys.path.append(os.path.abspath(package_dir))
from config import Configuration
from core.model import (
ConfigurationSetting,
ExternalIntegration as EI,
Library,
create,
production_session,
)
log = logging.getLogger(name="Content Server configuration import")
def log_import(integration_or_setting):
log.info("CREATED: %r" % integration_or_setting)
_db = production_session()
try:
Configuration.load()
library = Library.default(_db)
if not library:
library, ignore = create(
_db, Library, name=u'default', short_name=u'default',
uuid=unicode(uuid.uuid4())
)
library.is_default = True
# Create the Bibblio integration.
bibblio_conf = Configuration.integration('Bibblio')
if bibblio_conf:
bibblio = EI(
name=EI.BIBBLIO,
protocol=EI.BIBBLIO,
goal=EI.METADATA_GOAL
)
_db.add(bibblio)
bibblio.username = bibblio_conf.get('client_id')
bibblio.password = bibblio_conf.get('client_secret')
log_import(bibblio)
# Create the Metadata Wrangler configuration.
metadata_wrangler_conf = Configuration.integration('Metadata Wrangler')
if metadata_wrangler_conf:
wrangler = EI(
name=EI.METADATA_WRANGLER,
protocol=EI.METADATA_WRANGLER,
goal=EI.METADATA_GOAL
)
_db.add(wrangler)
wrangler.url = metadata_wrangler_conf.get('url')
wrangler.username = metadata_wrangler_conf.get('client_id')
wrangler.password = metadata_wrangler_conf.get('client_secret')
log_import(wrangler)
# Get the base url.
content_server_conf = Configuration.integration('Content Server')
if content_server_conf:
url = content_server_conf.get('url')
setting = ConfigurationSetting.sitewide(_db, Configuration.BASE_URL_KEY)
setting.value = url
log_import(setting)
# Copy facet configuration to the library.
facet_policy = Configuration.policy("facets", default={})
default_enabled = Configuration.DEFAULT_ENABLED_FACETS
enabled = facet_policy.get("enabled", default_enabled)
for k, v in enabled.items():
library.enabled_facets_setting(unicode(k)).value = unicode(json.dumps(v))
default_facets = Configuration.DEFAULT_FACET
default = facet_policy.get("default", default_facets)
for k, v in default.items():
library.default_facet_setting(unicode(k)).value = unicode(v)
log.info('Default facets imported')
except Exception as e:
# Catch any error and roll back the database so the full
# migration can be run again without raising integrity exceptions
# for duplicate integrations.
_db.rollback()
raise e
finally:
_db.commit()
_db.close()
|
py | b412562fc70065e0c0e41ec87441f548e2432663 | a = []
b = 0
c = 0
n = int(input())
m = int(input())
for i in range(n):
a.append(int(input()))
a.sort()
while b < m:
b += a.pop()
c += 1
print(c) |
py | b4125649957710ed0c96b0a8b0058c372d95ae9b | import sys
import os
import re
# Disabled on OpenBSD due to fail #XXXX.
import platform
if platform.system() == "OpenBSD":
self.skip = 1
# mask BFD warnings: https://bugs.launchpad.net/tarantool/+bug/1018356
sys.stdout.push_filter("unable to read unknown load command 0x2\d+", "")
server.test_option("--help")
server.test_option("-h")
# Replace with the same value for case when builddir inside source dir
sys.stdout.push_filter(re.escape(os.getenv("BUILDDIR")+"/src/tarantool"), "tarantool")
sys.stdout.push_filter(re.escape(os.getenv("BUILDDIR")), "${SOURCEDIR}")
sys.stdout.push_filter(re.escape(os.getenv("SOURCEDIR")+"/src/tarantool"), "tarantool")
sys.stdout.push_filter(re.escape(os.getenv("SOURCEDIR")), "${SOURCEDIR}")
sys.stdout.push_filter("invalid option.*", "invalid option")
sys.stdout.push_filter("unrecognized option.*", "unrecognized option")
server.test_option("-Z")
server.test_option("--no-such-option")
server.test_option("--no-such-option --version")
sys.stdout.push_filter(".* (\d+)\.\d+\.\d+(-\w+)?(-\d+)?(-\w+)?", "Tarantool \\1.minor.patch-<suffix>-<rev>-<commit>")
sys.stdout.push_filter("Target: .*", "Target: platform <build>")
sys.stdout.push_filter(".*Disable shared arena since.*\n", "")
sys.stdout.push_filter("Build options: .*", "Build options: flags")
sys.stdout.push_filter("C_FLAGS:.*", "C_FLAGS: flags")
sys.stdout.push_filter("CXX_FLAGS:.*", "CXX_FLAGS: flags")
sys.stdout.push_filter("Compiler: .*", "Compiler: cc")
server.test_option("--version")
server.test_option("-v")
server.test_option("-V ")
script = os.getenv("SOURCEDIR") + "/test/box-py/args.lua"
server.test_option(script)
server.test_option(script + " 1 2 3")
server.test_option(script + " 1 2 3 -V")
server.test_option(script + " -V 1 2 3")
server.test_option(script + " 1 2 3 --help")
server.test_option(script + " --help 1 2 3")
server.test_option("-V " + script + " 1 2 3")
# gh-3966: os.exit() hangs if called by a command from the argument list.
server.test_option("-e 'print(1) os.exit() print(2)'")
server.test_option("-e 'print(1)' -e 'os.exit()' -e 'print(1)' -e 'os.exit()' -e 'print(1)'")
server.test_option("-e \"print('Hello')\" " + script + " 1 2 3")
server.test_option("-e 'a = 10' " + \
"-e print(a) " + \
script + \
" 1 2 3 --help")
server.test_option("-e \"print(rawget(_G, 'log') == nil)\" " + \
"-e io.flush() " + \
"-l log " + \
"-e \"print(log.info('Hello'))\" " + \
script + \
" 1 2 3 --help")
sys.stdout.clear_all_filters()
|
py | b41257a0f7a1298f67804cf06ed600e6797e3114 | __all__ = [
"BusinessDaysOperatorType",
"BusinessDaysFilterType",
"BusinessDates",
"BusinessFilterMethod",
"BusinessHolidayType",
"PeriodType",
"WeekdaysType",
]
from libtad.datatypes.business.business_days_operator_type import BusinessDaysOperatorType
from libtad.datatypes.business.business_days_filter_type import BusinessDaysFilterType
from libtad.datatypes.business.business_dates import BusinessDates
from libtad.datatypes.business.business_filter_method import BusinessFilterMethod
from libtad.datatypes.business.business_holiday_type import BusinessHolidayType
from libtad.datatypes.business.period_type import PeriodType
from libtad.datatypes.business.weekdays_type import WeekdaysType
def __dir__():
return __all__ |
py | b4125837d5caa9fb4f2f8c2ed828e877e0ce1d9b | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from ducktape.tests.test import Test
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.version import LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0, LATEST_1_1, \
LATEST_2_0, LATEST_2_1, LATEST_2_2, LATEST_2_3
from kafkatest.services.streams import CooperativeRebalanceUpgradeService
from kafkatest.tests.streams.utils import verify_stopped, stop_processors, verify_running
class StreamsCooperativeRebalanceUpgradeTest(Test):
"""
Test of a rolling upgrade from eager rebalance to
cooperative rebalance
"""
source_topic = "source"
sink_topic = "sink"
task_delimiter = "#"
report_interval = "1000"
processing_message = "Processed [0-9]* records so far"
stopped_message = "COOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED"
running_state_msg = "STREAMS in a RUNNING State"
cooperative_turned_off_msg = "Eager rebalancing protocol is enabled now for upgrade from %s"
cooperative_enabled_msg = "Cooperative rebalancing protocol is enabled now"
first_bounce_phase = "first_bounce_phase-"
second_bounce_phase = "second_bounce_phase-"
# !!CAUTION!!: THIS LIST OF VERSIONS IS FIXED, NO VERSIONS MUST BE ADDED
streams_eager_rebalance_upgrade_versions = [str(LATEST_0_10_0), str(LATEST_0_10_1), str(LATEST_0_10_2), str(LATEST_0_11_0),
str(LATEST_1_0), str(LATEST_1_1), str(LATEST_2_0), str(LATEST_2_1), str(LATEST_2_2),
str(LATEST_2_3)]
def __init__(self, test_context):
super(StreamsCooperativeRebalanceUpgradeTest, self).__init__(test_context)
self.topics = {
self.source_topic: {'partitions': 9},
self.sink_topic: {'partitions': 9}
}
self.zookeeper = ZookeeperService(self.test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=3,
zk=self.zookeeper, topics=self.topics)
self.producer = VerifiableProducer(self.test_context,
1,
self.kafka,
self.source_topic,
throughput=1000,
acks=1)
@cluster(num_nodes=8)
@matrix(upgrade_from_version=streams_eager_rebalance_upgrade_versions)
def test_upgrade_to_cooperative_rebalance(self, upgrade_from_version):
self.zookeeper.start()
self.kafka.start()
processor1 = CooperativeRebalanceUpgradeService(self.test_context, self.kafka)
processor2 = CooperativeRebalanceUpgradeService(self.test_context, self.kafka)
processor3 = CooperativeRebalanceUpgradeService(self.test_context, self.kafka)
processors = [processor1, processor2, processor3]
# produce records continually during the test
self.producer.start()
# start all processors without upgrade_from config; normal operations mode
self.logger.info("Starting all streams clients in normal running mode")
for processor in processors:
processor.set_version(upgrade_from_version)
self.set_props(processor)
processor.CLEAN_NODE_ENABLED = False
# can't use state as older version don't have state listener
# so just verify up and running
verify_running(processor, self.processing_message)
# all running rebalancing has ceased
for processor in processors:
self.verify_processing(processor, self.processing_message)
# first rolling bounce with "upgrade.from" config set
previous_phase = ""
self.maybe_upgrade_rolling_bounce_and_verify(processors,
previous_phase,
self.first_bounce_phase,
upgrade_from_version)
# All nodes processing, rebalancing has ceased
for processor in processors:
self.verify_processing(processor, self.first_bounce_phase + self.processing_message)
# second rolling bounce without "upgrade.from" config
self.maybe_upgrade_rolling_bounce_and_verify(processors,
self.first_bounce_phase,
self.second_bounce_phase)
# All nodes processing, rebalancing has ceased
for processor in processors:
self.verify_processing(processor, self.second_bounce_phase + self.processing_message)
# now verify tasks are unique
for processor in processors:
self.get_tasks_for_processor(processor)
self.logger.info("Active tasks %s" % processor.active_tasks)
overlapping_tasks = processor1.active_tasks.intersection(processor2.active_tasks)
assert len(overlapping_tasks) == int(0), \
"Final task assignments are not unique %s %s" % (processor1.active_tasks, processor2.active_tasks)
overlapping_tasks = processor1.active_tasks.intersection(processor3.active_tasks)
assert len(overlapping_tasks) == int(0), \
"Final task assignments are not unique %s %s" % (processor1.active_tasks, processor3.active_tasks)
overlapping_tasks = processor2.active_tasks.intersection(processor3.active_tasks)
assert len(overlapping_tasks) == int(0), \
"Final task assignments are not unique %s %s" % (processor2.active_tasks, processor3.active_tasks)
# test done close all down
stop_processors(processors, self.second_bounce_phase + self.stopped_message)
self.producer.stop()
self.kafka.stop()
self.zookeeper.stop()
def maybe_upgrade_rolling_bounce_and_verify(self,
processors,
previous_phase,
current_phase,
upgrade_from_version=None):
for processor in processors:
# stop the processor in prep for setting "update.from" or removing "update.from"
verify_stopped(processor, previous_phase + self.stopped_message)
# upgrade to version with cooperative rebalance
processor.set_version("")
processor.set_upgrade_phase(current_phase)
if upgrade_from_version is not None:
# need to remove minor version numbers for check of valid upgrade from numbers
upgrade_version = upgrade_from_version[:upgrade_from_version.rfind('.')]
rebalance_mode_msg = self.cooperative_turned_off_msg % upgrade_version
else:
upgrade_version = None
rebalance_mode_msg = self.cooperative_enabled_msg
self.set_props(processor, upgrade_version)
node = processor.node
with node.account.monitor_log(processor.STDOUT_FILE) as stdout_monitor:
with node.account.monitor_log(processor.LOG_FILE) as log_monitor:
processor.start()
# verify correct rebalance mode either turned off for upgrade or enabled after upgrade
log_monitor.wait_until(rebalance_mode_msg,
timeout_sec=60,
err_msg="Never saw '%s' message " % rebalance_mode_msg + str(processor.node.account))
# verify rebalanced into a running state
rebalance_msg = current_phase + self.running_state_msg
stdout_monitor.wait_until(rebalance_msg,
timeout_sec=60,
err_msg="Never saw '%s' message " % rebalance_msg + str(
processor.node.account))
# verify processing
verify_processing_msg = current_phase + self.processing_message
stdout_monitor.wait_until(verify_processing_msg,
timeout_sec=60,
err_msg="Never saw '%s' message " % verify_processing_msg + str(
processor.node.account))
def verify_processing(self, processor, pattern):
self.logger.info("Verifying %s processing pattern in STDOUT_FILE" % pattern)
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor:
monitor.wait_until(pattern,
timeout_sec=60,
err_msg="Never saw processing of %s " % pattern + str(processor.node.account))
def get_tasks_for_processor(self, processor):
retries = 0
while retries < 5:
found_tasks = list(processor.node.account.ssh_capture("grep TASK-ASSIGNMENTS %s | tail -n 1" % processor.STDOUT_FILE, allow_fail=True))
self.logger.info("Returned %s from assigned task check" % found_tasks)
if len(found_tasks) > 0:
task_string = str(found_tasks[0]).strip()
self.logger.info("Converted %s from assigned task check" % task_string)
processor.set_tasks(task_string)
return
retries += 1
time.sleep(1)
return
def set_props(self, processor, upgrade_from=None):
processor.SOURCE_TOPIC = self.source_topic
processor.SINK_TOPIC = self.sink_topic
processor.REPORT_INTERVAL = self.report_interval
processor.UPGRADE_FROM = upgrade_from
|
py | b4125b6e4a92da19fcdd860cadb63e549bf58ca2 |
#Given a number of integers, combine them so it would create the largest number.
#Example:
#Input: [17, 7, 2, 45, 72]
#Output: 77245217
#Analysis
#Sort the array by custom comparer:
# (A,B) -> return AB/(10 ** num of digit of AB) - BA / (10 ** num of digit of BA)
# we want to normalize the combination to avoid bias to long number
# After getting the sorted list
# merge all numbers
# Time cost : O(NlogN) for sorting Space cost: O(1)
from functools import cmp_to_key, reduce
from math import log
class Solution:
def largestNum(self, nums):
nums = sorted(nums, key=cmp_to_key(self.__compare))
result = reduce(lambda A,B : self.__merge(A,B), nums)
return int(result)
def __compare(self, B, A):
left = self.__merge(A, B)
right = self.__merge(B, A)
lDigit = self.__digits(left)
rDigit = self.__digits(right)
return left/(10**lDigit) - right/(10**rDigit)
def __digits(self, n):
return (log(n) // log (10))
def __merge(self, n1, n2):
if n2 > 0:
numDigits = 1 + self.__digits(n2)
else:
numDigits = 0
return n1 * (10 ** numDigits) + n2
def largestNum(nums):
# Fill this in.
solu = Solution()
return solu.largestNum(nums)
if __name__ == "__main__":
print(largestNum([7, 45, 72]))
print(largestNum([ 9, 45, 822]))
print (largestNum([17, 7, 2, 45, 72]) )
# 77245217 |
py | b4125b7373edff6a3c8648c5f92833ec8b984641 | # Copyright 2015. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from cfgm_common import exceptions as vnc_exc
import eventlet
import netaddr
try:
from neutron_lib import constants
except ImportError:
from neutron.plugins.common import constants
try:
from oslo.config import cfg
except ImportError:
from oslo_config import cfg
from vnc_api import vnc_api
import contrail_res_handler as res_handler
import fip_res_handler
import sg_res_handler as sg_handler
import subnet_res_handler as subnet_handler
import vn_res_handler as vn_handler
class VMInterfaceMixin(object):
@staticmethod
def _port_fixed_ips_is_present(check, against):
# filters = {'fixed_ips': {'ip_address': ['20.0.0.5', '20.0.0.6']}}
# check = {'ip_address': ['20.0.0.5', '20.0.0.6']}
# against = [{'subnet_id': 'uuid', 'ip_address': u'20.0.0.5'}]
for item in against:
result = True
for k in item.keys():
if k in check and item[k] not in check[k]:
result = False
if result:
return True
return False
@staticmethod
def _get_vmi_memo_req_dict(vn_objs, iip_objs, vm_objs):
memo_req = {'networks': {},
'subnets': {},
'virtual-machines': {},
'instance-ips': {}}
for vn_obj in vn_objs or []:
memo_req['networks'][vn_obj.uuid] = vn_obj
memo_req['subnets'][vn_obj.uuid] = (
subnet_handler.SubnetHandler.get_vn_subnets(vn_obj))
for iip_obj in iip_objs or []:
memo_req['instance-ips'][iip_obj.uuid] = iip_obj
for vm_obj in vm_objs or []:
memo_req['virtual-machines'][vm_obj.uuid] = vm_obj
return memo_req
@staticmethod
def _get_extra_dhcp_opts(vmi_obj):
dhcp_options_list = (
vmi_obj.get_virtual_machine_interface_dhcp_option_list())
if dhcp_options_list and dhcp_options_list.dhcp_option:
dhcp_options = []
for dhcp_option in dhcp_options_list.dhcp_option:
pair = {'opt_value': dhcp_option.dhcp_option_value,
'opt_name': dhcp_option.dhcp_option_name}
dhcp_options.append(pair)
return dhcp_options
@staticmethod
def _get_allowed_adress_pairs(vmi_obj):
allowed_address_pairs = (
vmi_obj.get_virtual_machine_interface_allowed_address_pairs())
if (allowed_address_pairs and
allowed_address_pairs.allowed_address_pair):
address_pairs = []
for aap in allowed_address_pairs.allowed_address_pair:
pair = {}
pair['mac_address'] = aap.mac
if aap.ip.get_ip_prefix_len() == 32:
pair['ip_address'] = '%s' % (aap.ip.get_ip_prefix())
else:
pair['ip_address'] = '%s/%s' % (aap.ip.get_ip_prefix(),
aap.ip.get_ip_prefix_len())
address_pairs.append(pair)
return address_pairs
@staticmethod
def _ip_address_to_subnet_id(ip_addr, vn_obj, memo_req):
subnets_info = memo_req['subnets'].get(vn_obj.uuid)
for subnet_info in subnets_info or []:
if (netaddr.IPAddress(ip_addr) in
netaddr.IPSet([subnet_info['cidr']])):
return subnet_info['id']
ipam_refs = vn_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs or []:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
if netaddr.IPAddress(ip_addr) in netaddr.IPSet([cidr]):
return subnet_vnc.subnet_uuid
def get_vmi_ip_dict(self, vmi_obj, vn_obj, port_req_memo):
ip_dict_list = []
ip_back_refs = getattr(vmi_obj, 'instance_ip_back_refs', None)
for ip_back_ref in ip_back_refs or []:
iip_uuid = ip_back_ref['uuid']
# fetch it from request context cache/memo if there
try:
ip_obj = port_req_memo['instance-ips'][iip_uuid]
except KeyError:
try:
ip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid)
except vnc_exc.NoIdError:
continue
ip_addr = ip_obj.get_instance_ip_address()
subnet_id = getattr(ip_obj, 'subnet_uuid', None)
if not subnet_id:
subnet_id = self._ip_address_to_subnet_id(ip_addr, vn_obj,
port_req_memo)
ip_obj.set_subnet_uuid(subnet_id)
self._vnc_lib.instance_ip_update(ip_obj)
ip_q_dict = {'ip_address': ip_addr,
'subnet_id': subnet_id}
ip_dict_list.append(ip_q_dict)
return ip_dict_list
def get_vmi_net_id(self, vmi_obj):
net_refs = vmi_obj.get_virtual_network_refs()
if net_refs:
return net_refs[0]['uuid']
def _extract_gw_id_from_vm_fq_name(self, vm_fq_name_str):
"""Extract the gateway id from vm fq name.
Eg.
vm fq name will be of the format :
"default-domain__demo__si_2d192e48-db2b-4978-8ee3-0454a0fa691d__1..."
Extract '2d192e48-db2b-4978-8ee3-0454a0fa691d' and return it
"""
try:
gw_id = vm_fq_name_str.split('si_')
return gw_id[1].split('__')[0]
except Exception:
# any exception return None
return None
def get_port_gw_id(self, vm_ref, port_req_memo):
# try to extract the gw id from the vm fq_name.
# read the vm and si object only if necessary
gw_id = self._extract_gw_id_from_vm_fq_name(vm_ref['to'][-1])
if gw_id:
return gw_id
vm_uuid = vm_ref['uuid']
vm_obj = None
vm_obj = port_req_memo['virtual-machines'].get(vm_uuid)
if vm_obj is None:
try:
vm_obj = self._vnc_lib.virtual_machine_read(id=vm_uuid)
except vnc_exc.NoIdError:
return None
port_req_memo['virtual-machines'][vm_uuid] = vm_obj
si_refs = vm_obj.get_service_instance_refs()
if not si_refs:
return None
try:
si_obj = self._vnc_lib.service_instance_read(
id=si_refs[0]['uuid'],
fields=["logical_router_back_refs"])
except vnc_exc.NoIdError:
return None
rtr_back_refs = getattr(si_obj, "logical_router_back_refs", None)
if rtr_back_refs:
return rtr_back_refs[0]['uuid']
def _get_vmi_device_id_owner(self, vmi_obj, port_req_memo):
# port can be router interface or vm interface
# for performance read logical_router_back_ref only when we have to
device_id = ''
device_owner = None
router_refs = getattr(vmi_obj, 'logical_router_back_refs', None)
if router_refs is not None:
device_id = router_refs[0]['uuid']
elif vmi_obj.parent_type == 'virtual-machine':
device_id = vmi_obj.parent_name
elif vmi_obj.get_virtual_machine_refs():
vm_ref = vmi_obj.get_virtual_machine_refs()[0]
if vm_ref['to'][-1] == vm_ref['uuid']:
device_id = vm_ref['to'][-1]
else:
# this is a router gw port. Get the router id
rtr_uuid = self.get_port_gw_id(vm_ref, port_req_memo)
if rtr_uuid:
device_id = rtr_uuid
device_owner = constants.DEVICE_OWNER_ROUTER_GW
return device_id, device_owner
def _get_port_bindings(self, vmi_obj):
vmi_bindings_kvps = vmi_obj.get_virtual_machine_interface_bindings()
if vmi_bindings_kvps:
vmi_bindings = vmi_bindings_kvps.exportDict(name_=None) or {}
else:
vmi_bindings = {}
ret_bindings = {}
for k,v in vmi_bindings.items():
ret_bindings['binding:%s'%(k)] = v
# 1. upgrade case, port created before bindings prop was
# defined on vmi OR
# 2. defaults for keys needed by neutron
try:
ret_bindings['binding:vif_details'] = vmi_bindings['vif_details']
except KeyError:
ret_bindings['binding:vif_details'] = {'port_filter': True}
try:
ret_bindings['binding:vif_type'] = vmi_bindings['vif_type']
except KeyError:
ret_bindings['binding:vif_type'] = 'vrouter'
try:
ret_bindings['binding:vnic_type'] = vmi_bindings['vnic_type']
except KeyError:
ret_bindings['binding:vnic_type'] = 'normal'
return ret_bindings
def _vmi_to_neutron_port(self, vmi_obj, port_req_memo=None,
extensions_enabled=False, fields=None):
port_q_dict = {}
if not getattr(vmi_obj, 'display_name'):
# for ports created directly via vnc_api
port_q_dict['name'] = vmi_obj.get_fq_name()[-1]
else:
port_q_dict['name'] = vmi_obj.display_name
port_q_dict['id'] = vmi_obj.uuid
net_id = self.get_vmi_net_id(vmi_obj)
if not net_id:
# TODO() hack to force network_id on default port
# as neutron needs it
net_id = self._vnc_lib.obj_to_id(vnc_api.VirtualNetwork())
if port_req_memo is None:
# create a memo only for this port's conversion in this method
port_req_memo = {}
if 'networks' not in port_req_memo:
port_req_memo['networks'] = {}
if 'subnets' not in port_req_memo:
port_req_memo['subnets'] = {}
if 'virtual-machines' not in port_req_memo:
port_req_memo['virtual-machines'] = {}
try:
vn_obj = port_req_memo['networks'][net_id]
except KeyError:
vn_obj = self._vnc_lib.virtual_network_read(id=net_id)
port_req_memo['networks'][net_id] = vn_obj
subnets_info = (
subnet_handler.SubnetHandler.get_vn_subnets(vn_obj))
port_req_memo['subnets'][net_id] = subnets_info
if vmi_obj.parent_type != "project":
proj_id = self._project_id_vnc_to_neutron(vn_obj.parent_uuid)
else:
proj_id = self._project_id_vnc_to_neutron(vmi_obj.parent_uuid)
port_q_dict['tenant_id'] = proj_id
port_q_dict['network_id'] = net_id
# TODO() RHS below may need fixing
port_q_dict['mac_address'] = ''
mac_refs = vmi_obj.get_virtual_machine_interface_mac_addresses()
if mac_refs:
port_q_dict['mac_address'] = mac_refs.mac_address[0]
extra_dhcp_opts = self._get_extra_dhcp_opts(vmi_obj)
if extra_dhcp_opts:
port_q_dict['extra_dhcp_opts'] = extra_dhcp_opts
address_pairs = self._get_allowed_adress_pairs(vmi_obj)
if address_pairs:
port_q_dict['allowed_address_pairs'] = address_pairs
port_q_dict['fixed_ips'] = self.get_vmi_ip_dict(vmi_obj, vn_obj,
port_req_memo)
port_q_dict['security_groups'] = []
sg_refs = vmi_obj.get_security_group_refs()
# read the no rule sg
no_rule_sg = res_handler.SGHandler(
self._vnc_lib).get_no_rule_security_group()
for sg_ref in sg_refs or []:
if no_rule_sg and sg_ref['uuid'] == no_rule_sg.uuid:
# hide the internal sg
continue
port_q_dict['security_groups'].append(sg_ref['uuid'])
port_q_dict['admin_state_up'] = vmi_obj.get_id_perms().enable
device_id, device_owner = self._get_vmi_device_id_owner(vmi_obj,
port_req_memo)
port_q_dict['device_id'] = device_id
if device_owner is not None:
port_q_dict['device_owner'] = device_owner
else:
port_q_dict['device_owner'] = (
vmi_obj.get_virtual_machine_interface_device_owner() or '')
if port_q_dict['device_id']:
port_q_dict['status'] = constants.PORT_STATUS_ACTIVE
else:
port_q_dict['status'] = constants.PORT_STATUS_DOWN
if extensions_enabled:
extra_dict = {'fq_name': vmi_obj.get_fq_name()}
port_q_dict.update(extra_dict)
bindings_dict = self._get_port_bindings(vmi_obj)
for k,v in bindings_dict.items():
port_q_dict[k] = v
if fields:
port_q_dict = self._filter_res_dict(port_q_dict, fields)
return port_q_dict
def _set_vm_instance_for_vmi(self, vmi_obj, instance_name):
"""Set vm instance for the vmi.
This function also deletes the old virtual_machine object
associated with the vmi (if any) after the new virtual_machine
object is associated with it.
"""
vm_refs = vmi_obj.get_virtual_machine_refs()
delete_vm_list = []
for vm_ref in vm_refs or []:
if vm_ref['to'] != [instance_name]:
delete_vm_list.append(vm_ref)
if instance_name or delete_vm_list:
vm_handler = res_handler.VMachineHandler(self._vnc_lib)
if instance_name:
try:
instance_obj = vm_handler.ensure_vm_instance(instance_name)
vmi_obj.set_virtual_machine(instance_obj)
except vnc_exc.RefsExistError as e:
self._raise_contrail_exception(
'BadRequest', resource='port', msg=str(e))
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'DeviceIDNotOwnedByTenant', resource='port',
device_id=instance_name)
else:
vmi_obj.set_virtual_machine_list([])
if delete_vm_list:
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
for vm_ref in delete_vm_list:
try:
vm_handler._resource_delete(id=vm_ref['uuid'])
except vnc_exc.RefsExistError:
pass
def _set_vmi_security_groups(self, vmi_obj, sec_group_list):
vmi_obj.set_security_group_list([])
# When there is no-security-group for a port,the internal
# no_rule group should be used.
if not sec_group_list:
sg_obj = res_handler.SGHandler(
self._vnc_lib).get_no_rule_security_group()
vmi_obj.add_security_group(sg_obj)
for sg_id in sec_group_list or []:
# TODO() optimize to not read sg (only uuid/fqn needed)
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
vmi_obj.add_security_group(sg_obj)
def _set_vmi_extra_dhcp_options(self, vmi_obj, extra_dhcp_options):
dhcp_options = []
for option_pair in extra_dhcp_options or []:
option = vnc_api.DhcpOptionType(
dhcp_option_name=option_pair['opt_name'],
dhcp_option_value=option_pair['opt_value'])
dhcp_options.append(option)
if dhcp_options:
olist = vnc_api.DhcpOptionsListType(dhcp_options)
vmi_obj.set_virtual_machine_interface_dhcp_option_list(olist)
else:
vmi_obj.set_virtual_machine_interface_dhcp_option_list(None)
def _set_vmi_allowed_addr_pairs(self, vmi_obj, allowed_addr_pairs):
aap_array = []
for address_pair in allowed_addr_pairs or []:
mode = u'active-standby'
if 'mac_address' not in address_pair:
address_pair['mac_address'] = ""
cidr = address_pair['ip_address'].split('/')
if len(cidr) == 1:
subnet = vnc_api.SubnetType(cidr[0], 32)
elif len(cidr) == 2:
subnet = vnc_api.SubnetType(cidr[0], int(cidr[1]))
else:
self._raise_contrail_exception(
'BadRequest', resource='port',
msg='Invalid address pair argument')
aap_array.append(vnc_api.AllowedAddressPair(
subnet,
address_pair['mac_address'], mode))
aaps = vnc_api.AllowedAddressPairs()
if aap_array:
aaps.set_allowed_address_pair(aap_array)
vmi_obj.set_virtual_machine_interface_allowed_address_pairs(aaps)
def _get_vmi_ip_list(self, vmi_obj):
ip_back_refs = getattr(vmi_obj, 'instance_ip_back_refs', None)
vmi_obj_ips = []
if ip_back_refs:
ip_handler = res_handler.InstanceIpHandler(self._vnc_lib)
for ip_back_ref in ip_back_refs:
try:
ip_obj = ip_handler.get_iip_obj(id=ip_back_ref['uuid'])
except vnc_exc.NoIdError:
continue
vmi_obj_ips.append(ip_obj.get_instance_ip_address())
return vmi_obj_ips
def _check_vmi_fixed_ips(self, vmi_obj, fixed_ips, net_id):
vmi_obj_ips = self._get_vmi_ip_list(vmi_obj)
ip_handler = res_handler.InstanceIpHandler(self._vnc_lib)
for fixed_ip in fixed_ips or []:
ip_addr = fixed_ip.get('ip_address')
if not ip_addr or ip_addr in vmi_obj_ips:
continue
if ip_handler.is_ip_addr_in_net_id(ip_addr, net_id):
self._raise_contrail_exception(
'IpAddressInUse', net_id=net_id,
ip_address=ip_addr, resource='port')
def _neutron_port_to_vmi(self, port_q, vmi_obj=None, update=False):
if 'name' in port_q and port_q['name']:
vmi_obj.display_name = port_q['name']
device_owner = port_q.get('device_owner')
if (device_owner not in [constants.DEVICE_OWNER_ROUTER_INTF,
constants.DEVICE_OWNER_ROUTER_GW]
and 'device_id' in port_q):
self._set_vm_instance_for_vmi(vmi_obj, port_q.get('device_id'))
if device_owner is not None:
vmi_obj.set_virtual_machine_interface_device_owner(device_owner)
if ('mac_address' in port_q and port_q['mac_address']):
mac_addrs_obj = vnc_api.MacAddressesType()
mac_addrs_obj.set_mac_address([port_q['mac_address']])
vmi_obj.set_virtual_machine_interface_mac_addresses(mac_addrs_obj)
if 'security_groups' in port_q:
self._set_vmi_security_groups(vmi_obj,
port_q.get('security_groups'))
if 'admin_state_up' in port_q:
id_perms = vmi_obj.get_id_perms()
id_perms.enable = port_q['admin_state_up']
vmi_obj.set_id_perms(id_perms)
if 'extra_dhcp_opts' in port_q:
self._set_vmi_extra_dhcp_options(vmi_obj,
port_q.get('extra_dhcp_opts'))
if ('allowed_address_pairs' in port_q):
self._set_vmi_allowed_addr_pairs(
vmi_obj, port_q.get('allowed_address_pairs'))
if 'fixed_ips' in port_q:
net_id = (port_q.get('network_id') or
vmi_obj.get_virtual_network_refs()[0]['uuid'])
self._check_vmi_fixed_ips(vmi_obj, port_q.get('fixed_ips'), net_id)
# pick binding keys from neutron repr and persist as kvp elements.
# it is assumed allowing/denying oper*key is done at neutron-server.
if not update:
vmi_binding_kvps = dict((k.replace('binding:',''), v)
for k,v in port_q.items() if k.startswith('binding:'))
vmi_obj.set_virtual_machine_interface_bindings(
vnc_api.KeyValuePairs([vnc_api.KeyValuePair(k,v)
for k,v in vmi_binding_kvps.items()]))
else:
vmi_binding_kvps = dict((k.replace('binding:',''), v)
for k,v in port_q.items() if k.startswith('binding:'))
for k,v in vmi_binding_kvps.items():
vmi_obj.add_virtual_machine_interface_bindings(
vnc_api.KeyValuePair(key=k, value=v))
return vmi_obj
def _create_instance_ips(self, vn_obj, vmi_obj, fixed_ips, ip_family="v4"):
if fixed_ips is None:
return
# 1. find existing ips on port
# 2. add new ips on port from update body
# 3. delete old/stale ips on port
subnets = dict()
ipam_refs = vn_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs or []:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
subnets[subnet_vnc.subnet_uuid] = cidr
stale_ip_ids = {}
ip_handler = res_handler.InstanceIpHandler(self._vnc_lib)
for iip in getattr(vmi_obj, 'instance_ip_back_refs', []):
iip_obj = ip_handler.get_iip_obj(id=iip['uuid'])
ip_addr = iip_obj.get_instance_ip_address()
stale_ip_ids[ip_addr] = iip['uuid']
created_iip_ids = []
for fixed_ip in fixed_ips:
try:
ip_addr = fixed_ip.get('ip_address')
if ip_addr is not None:
try:
# this ip survives to next gen
del stale_ip_ids[ip_addr]
continue
except KeyError:
pass
if netaddr.IPAddress(ip_addr).version == 4:
ip_family = "v4"
elif netaddr.IPAddress(ip_addr).version == 6:
ip_family = "v6"
subnet_id = fixed_ip.get('subnet_id')
if subnet_id and subnet_id not in subnets:
for iip_id in created_iip_ids:
ip_handler._resource_delete(id=iip_id)
self._raise_contrail_exception(
'BadRequest',
msg='Subnet invalid for network', resource='port')
ip_family = fixed_ip.get('ip_family', ip_family)
ip_id = ip_handler.create_instance_ip(vn_obj, vmi_obj, ip_addr,
subnet_id, ip_family)
created_iip_ids.append(ip_id)
except vnc_exc.HttpError as e:
# Resources are not available
for iip_id in created_iip_ids:
ip_handler._resource_delete(id=iip_id)
if e.status_code == 400:
if 'subnet_id' in fixed_ip:
self._raise_contrail_exception(
'InvalidIpForSubnet',
ip_address=fixed_ip.get('ip_address'),
resource='port')
else:
self._raise_contrail_exception(
'InvalidIpForNetwork',
ip_address=fixed_ip.get('ip_address'),
resource='port')
else:
self._raise_contrail_exception(
'IpAddressGenerationFailure',
net_id=vn_obj.get_uuid(), resource='port')
except vnc_exc.PermissionDenied:
self._raise_contrail_exception(
'IpAddressInUse', net_id=vn_obj.get_uuid(),
ip_address=fixed_ip.get('ip_address'), resource='port')
iips_total = list(created_iip_ids)
for stale_ip, stale_id in stale_ip_ids.items():
ip_handler.delete_iip_obj(stale_id)
def get_vmi_tenant_id(self, vmi_obj):
if vmi_obj.parent_type != "project":
net_id = vmi_obj.get_virtual_network_refs()[0]['uuid']
vn_get_handler = vn_handler.VNetworkGetHandler(self._vnc_lib)
vn_obj = vn_get_handler.get_vn_obj(id=net_id)
return vn_get_handler.get_vn_tenant_id(vn_obj)
return self._project_id_vnc_to_neutron(vmi_obj.parent_uuid)
def _validate_mac_address(self, project_id, net_id, mac_address):
ports = self._vnc_lib.virtual_machine_interfaces_list(
parent_id=project_id, back_ref_id=net_id, detail=True)
for port in ports:
macs = port.get_virtual_machine_interface_mac_addresses()
for mac in macs.get_mac_address():
if mac == mac_address:
raise self._raise_contrail_exception(
"MacAddressInUse", net_id=net_id, mac=mac_address,
resource='port')
class VMInterfaceCreateHandler(res_handler.ResourceCreateHandler,
VMInterfaceMixin):
resource_create_method = 'virtual_machine_interface_create'
def _get_tenant_id_for_create(self, context, resource):
if context['is_admin'] and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context['tenant']):
reason = ('Cannot create resource for another tenant')
self._raise_contrail_exception('AdminRequired', reason=reason,
resource='port')
else:
tenant_id = context['tenant']
return tenant_id
def _create_vmi_obj(self, port_q, vn_obj):
project_id = self._project_id_neutron_to_vnc(port_q['tenant_id'])
try:
proj_obj = self._project_read(proj_id=project_id)
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'ProjectNotFound',
projec_id=project_id, resource='port')
id_perms = vnc_api.IdPermsType(enable=True)
vmi_uuid = str(uuid.uuid4())
if port_q.get('name'):
vmi_name = port_q['name']
else:
vmi_name = vmi_uuid
vmi_obj = vnc_api.VirtualMachineInterface(vmi_name, proj_obj,
id_perms=id_perms)
vmi_obj.uuid = vmi_uuid
vmi_obj.set_virtual_network(vn_obj)
vmi_obj.set_security_group_list([])
if ('security_groups' not in port_q or
port_q['security_groups'].__class__ is object):
sg_obj = vnc_api.SecurityGroup("default", proj_obj)
uid = sg_handler.SecurityGroupHandler(
self._vnc_lib)._ensure_default_security_group_exists(
proj_obj.uuid)
sg_obj.uuid = uid
vmi_obj.add_security_group(sg_obj)
return vmi_obj
def resource_create(self, context, port_q):
if 'network_id' not in port_q or 'tenant_id' not in port_q:
raise self._raise_contrail_exception(
'BadRequest', resource='port',
msg="'tenant_id' and 'network_id' are mandatory")
net_id = port_q['network_id']
try:
vn_obj = self._vnc_lib.virtual_network_read(id=net_id)
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'NetworkNotFound', net_id=net_id, resource='port')
tenant_id = self._get_tenant_id_for_create(context, port_q)
proj_id = self._project_id_neutron_to_vnc(tenant_id)
# if mac-address is specified, check against the exisitng ports
# to see if there exists a port with the same mac-address
if 'mac_address' in port_q:
self._validate_mac_address(proj_id, net_id, port_q['mac_address'])
# initialize port object
vmi_obj = self._create_vmi_obj(port_q, vn_obj)
vmi_obj = self._neutron_port_to_vmi(port_q, vmi_obj=vmi_obj)
# determine creation of v4 and v6 ip object
ip_obj_v4_create = False
ip_obj_v6_create = False
fixed_ips = []
ipam_refs = vn_obj.get_network_ipam_refs() or []
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
if not ip_obj_v4_create and (
netaddr.IPNetwork(cidr).version == 4):
ip_obj_v4_create = True
fixed_ips.append(
{'subnet_id': subnet_vnc.subnet_uuid,
'ip_family': 'v4'})
if not ip_obj_v6_create and (
netaddr.IPNetwork(cidr).version == 6):
ip_obj_v6_create = True
fixed_ips.append(
{'subnet_id': subnet_vnc.subnet_uuid,
'ip_family': 'v6'})
# create the object
port_id = self._resource_create(vmi_obj)
try:
if 'fixed_ips' in port_q:
self._create_instance_ips(vn_obj, vmi_obj, port_q['fixed_ips'])
elif vn_obj.get_network_ipam_refs():
self._create_instance_ips(vn_obj, vmi_obj, fixed_ips)
except Exception as e:
# failure in creating the instance ip. Roll back
self._resource_delete(id=port_id)
raise e
# TODO() below reads back default parent name, fix it
vmi_obj = self._resource_get(id=port_id,
fields=['instance_ip_back_refs'])
ret_port_q = self._vmi_to_neutron_port(vmi_obj)
return ret_port_q
class VMInterfaceUpdateHandler(res_handler.ResourceUpdateHandler,
VMInterfaceMixin):
resource_update_method = 'virtual_machine_interface_update'
def resource_update(self, context, port_id, port_q):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
port_q['id'] = port_id
try:
vmi_obj = self._resource_get(id=port_q.get('id'),
fields=['instance_ip_back_refs'])
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
'PortNotFound', port_id=port_q.get('id'),
resource='port')
net_id = vmi_obj.get_virtual_network_refs()[0]['uuid']
vn_obj = self._vnc_lib.virtual_network_read(id=net_id)
if port_q.get('mac_address'):
self._validate_mac_address(
vmi_obj.parent_uuid,
net_id, port_q['mac_address'])
vmi_obj = self._neutron_port_to_vmi(port_q, vmi_obj=vmi_obj,
update=True)
if 'fixed_ips' in port_q:
self._create_instance_ips(vn_obj, vmi_obj, port_q['fixed_ips'])
self._resource_update(vmi_obj)
vmi_obj = self._resource_get(id=port_id,
fields=['instance_ip_back_refs'])
ret_port_q = self._vmi_to_neutron_port(
vmi_obj, extensions_enabled=contrail_extensions_enabled)
return ret_port_q
class VMInterfaceDeleteHandler(res_handler.ResourceDeleteHandler,
VMInterfaceMixin):
resource_delete_method = 'virtual_machine_interface_delete'
def resource_delete(self, context, port_id):
try:
vmi_obj = self._resource_get(back_refs=True, id=port_id)
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
"PortNotFound", port_id=port_id, resource='port')
if vmi_obj.parent_type == 'virtual-machine':
instance_id = vmi_obj.parent_uuid
else:
vm_refs = vmi_obj.get_virtual_machine_refs()
if vm_refs:
instance_id = vm_refs[0]['uuid']
else:
instance_id = None
if vmi_obj.get_logical_router_back_refs():
self._raise_contrail_exception(
'PortInUse', port_id=port_id,
net_id=self.get_vmi_net_id(vmi_obj),
device_id=instance_id,
resource='port')
# release instance IP address
iip_back_refs = list((getattr(vmi_obj, 'instance_ip_back_refs', [])))
ip_handler = res_handler.InstanceIpHandler(self._vnc_lib)
for iip_back_ref in iip_back_refs or []:
# if name contains IP address then this is shared ip
iip_obj = ip_handler.get_iip_obj(id=iip_back_ref['uuid'])
# in case of shared ip only delete the link to the VMI
iip_obj.del_virtual_machine_interface(vmi_obj)
if not iip_obj.get_virtual_machine_interface_refs():
ip_handler._resource_delete(id=iip_back_ref['uuid'])
else:
ip_handler._resource_update(iip_obj)
# disassociate any floating IP used by instance
fip_back_refs = getattr(vmi_obj, 'floating_ip_back_refs', None)
if fip_back_refs:
fip_handler = fip_res_handler.FloatingIpHandler(self._vnc_lib)
for fip_back_ref in fip_back_refs:
fip_handler.resource_update(context, fip_back_ref['uuid'],
{'port_id': None})
self._resource_delete(id=port_id)
# delete any interface route table associatd with the port
for rt_ref in vmi_obj.get_interface_route_table_refs() or []:
try:
self._vnc_lib.interface_route_table_delete(id=rt_ref['uuid'])
except vnc_exc.NoIdError:
pass
# delete instance if this was the last port
try:
if instance_id:
self._vnc_lib.virtual_machine_delete(id=instance_id)
except vnc_exc.RefsExistError:
pass
class VMInterfaceGetHandler(res_handler.ResourceGetHandler, VMInterfaceMixin):
resource_list_method = 'virtual_machine_interfaces_list'
resource_get_method = 'virtual_machine_interface_read'
back_ref_fields = ['logical_router_back_refs', 'instance_ip_back_refs',
'floating_ip_back_refs']
# returns vm objects, net objects, and instance ip objects
def _get_vmis_nets_ips(self, context, project_ids=None,
device_ids=None, vmi_uuids=None, vn_ids=None):
vn_list_handler = vn_handler.VNetworkGetHandler(self._vnc_lib)
pool = eventlet.GreenPool()
vn_objs_t = pool.spawn(vn_list_handler.get_vn_obj_list,
parent_id=project_ids, detail=True)
vmi_objs_t = None
vmi_obj_uuids_t = None
back_ref_id = []
if device_ids:
back_ref_id = device_ids
if vn_ids:
back_ref_id.extend(vn_ids)
if back_ref_id:
vmi_objs_t = pool.spawn(self._resource_list,
back_ref_id=back_ref_id, back_refs=True)
if vmi_uuids:
vmi_obj_uuids_t = pool.spawn(self._resource_list,
obj_uuids=vmi_uuids, back_refs=True)
elif not back_ref_id:
vmi_objs_t = pool.spawn(self._resource_list,
parent_id=project_ids, back_refs=True)
pool.waitall()
vn_objs = vn_objs_t._exit_event._result
vmi_objs = []
if vmi_objs_t is not None:
vmi_objs = vmi_objs_t._exit_event._result or []
if vmi_obj_uuids_t is not None:
vmi_objs.extend(vmi_obj_uuids_t._exit_event._result or [])
vmis_ids = [vmi.uuid for vmi in vmi_objs]
iip_list_handler = res_handler.InstanceIpHandler(self._vnc_lib)
iips_objs = iip_list_handler.get_iip_obj_list(back_ref_id=vmis_ids,
detail=True)
return vmi_objs, vn_objs, iips_objs
# get vmi related resources filtered by project_ids
def _get_vmi_resources(self, context, project_ids=None, ids=None,
device_ids=None, vn_ids=None):
if device_ids:
rtr_objs = self._vnc_lib.logical_routers_list(obj_uuids=device_ids,
detail=True)
if not ids:
ids = []
for rtr_obj in rtr_objs or []:
intfs = rtr_obj.get_virtual_machine_interface_refs()
for intf in intfs or []:
ids.append(intf['uuid'])
return self._get_vmis_nets_ips(context, project_ids=project_ids,
device_ids=device_ids,
vmi_uuids=ids, vn_ids=vn_ids)
def _get_ports_dict(self, vmi_objs, memo_req, extensions_enabled=False):
ret_ports = []
for vmi_obj in vmi_objs or []:
try:
port_info = self._vmi_to_neutron_port(
vmi_obj, memo_req, extensions_enabled=extensions_enabled)
except vnc_exc.NoIdError:
continue
ret_ports.append(port_info)
return ret_ports
def get_vmi_list(self, **kwargs):
return self._resource_list(**kwargs)
def resource_list(self, context=None, filters=None, fields=None):
if (filters.get('device_owner') == 'network:dhcp' or
'network:dhcp' in filters.get('device_owner', [])):
return []
if not context:
context = {'is_admin': True}
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
if filters is None:
filters = {}
project_ids = []
tenant_ids = []
if not context['is_admin']:
tenant_ids = [context['tenant']]
project_ids = [self._project_id_neutron_to_vnc(context['tenant'])]
elif 'tenant_id' in filters:
tenant_ids = filters['tenant_id']
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
# choose the most appropriate way of retrieving ports
# before pruning by other filters
if 'device_id' in filters:
vmi_objs, vn_objs, iip_objs = self._get_vmi_resources(
context, project_ids, device_ids=filters['device_id'],
vn_ids=filters.get('network_id'))
else:
vmi_objs, vn_objs, iip_objs = self._get_vmi_resources(
context, project_ids, ids=filters.get('id'),
vn_ids=filters.get('network_id'))
memo_req = self._get_vmi_memo_req_dict(vn_objs, iip_objs, None)
ports = self._get_ports_dict(
vmi_objs, memo_req,
extensions_enabled=contrail_extensions_enabled)
# prune phase
ret_ports = []
for port in ports:
if tenant_ids and port['tenant_id'] not in tenant_ids:
continue
# TODO(safchain) revisit these filters if necessary
if not self._filters_is_present(filters, 'name', port['name']):
continue
if not self._filters_is_present(
filters, 'device_owner', port['device_owner']):
continue
if 'fixed_ips' in filters and not self._port_fixed_ips_is_present(
filters['fixed_ips'], port['fixed_ips']):
continue
if fields:
port = self._filter_res_dict(port, fields)
ret_ports.append(port)
return ret_ports
def get_vmi_obj(self, vmi_id, fields=None):
return self._resource_get(id=vmi_id, fields=fields)
def resource_get(self, context, port_id, fields=None):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
try:
vmi_obj = self._resource_get(id=port_id, back_refs=True)
except vnc_exc.NoIdError:
self._raise_contrail_exception('PortNotFound', port_id=port_id,
resource='port')
ret_port_q = self._vmi_to_neutron_port(
vmi_obj, extensions_enabled=contrail_extensions_enabled,
fields=fields)
return ret_port_q
def resource_count(self, context, filters=None):
count = self._resource_count_optimized(filters)
if count is not None:
return count
if (filters.get('device_owner') == 'network:dhcp' or
'network:dhcp' in filters.get('device_owner', [])):
return 0
if 'tenant_id' in filters:
if isinstance(filters['tenant_id'], list):
project_id = self._project_id_neutron_to_vnc(
filters['tenant_id'][0])
else:
project_id = self._project_id_neutron_to_vnc(
filters['tenant_id'])
nports = len(self._resource_list(parent_id=project_id))
else:
# across all projects - TODO() very expensive,
# get only a count from api-server!
nports = len(self.resource_list(filters=filters))
return nports
class VMInterfaceHandler(VMInterfaceGetHandler,
VMInterfaceCreateHandler,
VMInterfaceDeleteHandler,
VMInterfaceUpdateHandler):
pass
|
py | b4125da1ce73af73d9215fae5701332923306a47 |
class MetaHasFields(type):
"""Metaclass for classes that support named field definitions.
In Pale, this is particularly Endpoint and Resource. This metaclass is
here to populate the developer-specified fields on the class so that
simple API definition like the following is possible:
class MyEndpoint(Endpoint):
_method = 'GET'
_uri = '/hello'
name = StringArgument(
description="Who should we say Hello to?",
default="World")
_returns = RawStringResource("A 'Hello, world' string")
def _handle(self, context):
return "Hello, %s" % context.args.name
In the above case, this metaclass allows us to create a `_arguments` map
that would store {"name": MyEndpoint.name}.
"""
def __init__(cls, name, bases, classdict):
super(MetaHasFields, cls).__init__(name, bases, classdict)
cls._fix_up_fields()
|
py | b4125e2cc83a0ff3d3d7ad397c8bae80e7d3fbc0 | import unittest
import torch
from easy_bert.losses.crf_layer import CRF
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.crf = CRF(4)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.crf.to(self.device)
def test_decode(self):
print('test_decode~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
logits = torch.randn((2, 3, 4), requires_grad=False).to(self.device) # (batch_size, seq_len, num_label)
mask = torch.IntTensor([[1, 1, 1], [1, 1, 0]]).to(self.device) # 第二个样本pad了一个位置
labels, scores = self.crf.viterbi_decode(logits, mask)
print(labels)
def test_loss(self):
print('test_loss~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
logits = torch.randn((2, 3, 4), requires_grad=True).to(self.device) # (batch_size, seq_len, num_label)
mask = torch.IntTensor([[1, 1, 1], [1, 1, 0]]).to(self.device) # 第二个样本pad了一个位置
labels = torch.LongTensor([[0, 2, 3], [1, 0, 1]]).to(self.device)
loss = self.crf.forward(logits, labels, mask)
print(loss)
if __name__ == '__main__':
unittest.main()
|
py | b4125e60ecf7bc94fab88dff517d1d62baf8709c | import argparse
import os
import yaml
def add_root(path_from_here):
wd = os.path.abspath(os.getcwd())
root = os.path.join(wd, path_from_here)
return f'ROOT = "{root}"\n\n'
def create_class_str(class_name):
return f"class {class_name}:\n"
def create_class_var_str(k, v):
if isinstance(v, str):
return f"{k} = '{v}'\n"
else:
return f"{k} = {v}\n"
def generate_classes(k, v, is_sub_n):
out_str = is_sub_n * 4 * ' '
if isinstance(v, dict):
if k:
out_str += create_class_str(k)
else:
is_sub_n -= 1
for sk, sv in v.items():
out_str += generate_classes(sk, sv, is_sub_n + 1)
else:
out_str += create_class_var_str(k, v)
return out_str
def get_out_file_path(yaml_filepath):
out_file_path = yaml_filepath.split('/')
out_file_name = out_file_path.pop()
out_file_name = out_file_name[:out_file_name.find('.')] + '.py'
out_file_path.append(out_file_name)
return os.path.join(*out_file_path)
def load_yaml(yaml_file):
with open(yaml_file, 'r') as stream:
yaml_conf = yaml.load(stream)
return yaml_conf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"yaml",
metavar='path/to/config.yaml',
help="Path to your config yaml file"
)
parser.add_argument(
"--root",
help="Add path to root dir"
)
parser.add_argument(
"--name",
help="Name for base class",
)
args = parser.parse_args()
outfilepath = get_out_file_path(args.yaml)
class_str = '"""This is a file that was automatically generated using PyConfY"""\n\n'
if args.root:
class_str += add_root(args.root)
class_str += generate_classes(args.name, load_yaml(args.yaml), 0)
with open(outfilepath, 'w') as f:
f.write(class_str)
print(f"Config file written to {outfilepath}")
|
py | b412604995bce697b531fc0d40084fd5d420f191 | """
Rectangle packer using an algorithm by Javier Arevalo.
http://www.flipcode.com/archives/Rectangle_Placement.shtml
http://kossovsky.net/index.php/2009/07/cshar-rectangle-packing/
You have a bunch of rectangular pieces. You need to arrange them in a rectangular surface
so that they don't overlap, keeping the total area of the rectangle as small as possible.
This is fairly common when arranging characters in a bitmapped font, lightmaps for a 3D
engine, and I guess other situations as well.
The idea of self.algorithm is that, as we add rectangles, we can pre-select "interesting"
places where we can try to add the next rectangles. For optimal results, the rectangles
should be added in order. I initially tried using area as a sorting criteria, but it
didn't work well with very tall or very flat rectangles. I then tried using the longest
dimension as a selector, and it worked much better. So much for intuition... These
"interesting" places are just to the right and just below the currently added rectangle.
The first rectangle, obviously, goes at the top left, the next one would go either to the
right or below self.one, and so on. It is a weird way to do it, but it seems to work very
nicely. The way we search here is fairly brute-force, the fact being that for most
off-line purposes the performance seems more than adequate. I have generated a japanese
font with around 8500 characters and all the time was spent generating the bitmaps. Also,
for all we care, we could grow the parent rectangle in a different way than power of two.
It just happens that power of 2 is very convenient for graphics hardware textures. I'd be
interested in hearing of other approaches to self.problem. Make sure to post them on
http:#www.flipcode.com
Original code by Javier Arevalo (jare at iguanademos dot com). Rewritten
to C# / .NET by Markus Ewald (cygon at nuclex dot org).
C# code translated to Python and improved by leonardo maffi, V.1.0, Jul 12 2009.
This version is faster for few large rectangles.
-----------------
Nuclex Framework
Copyright (C) 2002-2009 Nuclex Development Labs
This library is free software; you can redistribute it and/or
modify it under the terms of the IBM Common Public License as
published by the IBM Corporation; either version 1.0 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
IBM Common Public License for more details.
You should have received a copy of the IBM Common Public
License along with self.library
"""
from bisect import insort
from array import array
class Anchor(object):
"""
Anchoring points are potential locations for the placement of new rectangles.
Each time a rectangle is inserted, an anchor point is generated on its upper
right end and another one at its lower left end. The anchor points are kept
in a list that is ordered by their closeness to the upper left corner of the
packing area (their 'rank') so the packer favors positions that are closer to
the upper left for new rectangles.
"""
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __lt__(self, other):
return self.x + self.y < other.x + other.y
class RectanglePacker(object):
def __init__(self, max_width, max_height):
"""
max_width, max_height: maximum width,height of the packing area.
"""
self._max_packing_area_width = max_width
self._max_packing_area_height = max_height
# Rectangles contained in the packing area
self._rectangles = []
# Anchoring points where new rectangles can potentially be placed
self._anchors = [Anchor(0, 0)]
# Current width,height of the packing area
self._actual_packing_area_width = 1
self._actual_packing_area_height = 1
# matrix of bits, to store where it's covered by rectangles
self._bitmatrix = array("B", [0]) * (max_width * max_height)
def actual_packing_area_width(self):
return self._actual_packing_area_width
def actual_packing_area_height(self):
return self._actual_packing_area_height
def pack(self, rect_width, rect_height):
"""
Tries to allocate space for a rectangle in the packing area.
rect_width, rect_height: width,height of the rectangle to allocate.
returns: an Anchor that represents the placement, if a space can be found.
Or None if no placement can be found.
"""
# Try to find an anchor where the rectangle fits in, enlarging the packing
# area and repeating the search recursively until it fits or the
# maximum allowed size is exceeded.
anchor_idx = self._select_anchor(rect_width, rect_height, self._actual_packing_area_width,
self._actual_packing_area_height)
# No anchor could be found at which the rectangle did fit in
if anchor_idx == -1:
return None
placement = self._anchors[anchor_idx]
# Move the rectangle either to the left or to the top until it collides with
# a neightbouring rectangle. This is done to combat the effect of lining up
# rectangles with gaps to the left or top of them because the anchor that
# would allow placement there has been blocked by another rectangle
self._optimize_placement(placement, rect_width, rect_height)
# Remove the used anchor and add new anchors at the upper right and lower left
# positions of the new rectangle
# The anchor is only removed if the placement optimization didn't
# move the rectangle so far that the anchor isn't blocked anymore
if ((placement.x + rect_width) > self._anchors[anchor_idx].x) and \
((placement.y + rect_height) > self._anchors[anchor_idx].y):
del self._anchors[anchor_idx]
# Add new anchors at the upper right and lower left coordinates of the rectangle
insort(self._anchors, Anchor(placement.x + rect_width, placement.y))
insort(self._anchors, Anchor(placement.x, placement.y + rect_height))
# Finally, we can add the rectangle to our packed rectangles list
self._rectangles.append([placement.x, placement.y, rect_width, rect_height])
shift = placement.y * self._max_packing_area_width
for y in range(rect_height):
start_line_pos = placement.x + shift
shift += self._max_packing_area_width
for i in range(start_line_pos, rect_width + start_line_pos):
self._bitmatrix[i] = 1
return placement
def _optimize_placement(self, placement, rect_width, rect_height):
"""
Optimizes the rectangle's placement by moving it either left or up to fill
any gaps resulting from rectangles blocking the anchors of the most optimal
placements.
placement: Placement to be optimized.
rect_width,rect_height: width,height of the rectangle to be optimized.
"""
rect = [placement.x, placement.y, rect_width, rect_height]
# Try to move the rectangle to the left as far as possible
left_most = placement.x
while self._is_free(rect, self._max_packing_area_width, self._max_packing_area_height):
left_most = rect[0]
rect[0] -= 1 # looks slow, there can be a way to move faster
# Reset rectangle to original position
rect[0] = placement.x
# Try to move the rectangle upwards as far as possible
top_most = placement.y
while self._is_free(rect, self._max_packing_area_width, self._max_packing_area_height):
top_most = rect[1]
rect[1] -= 1 # looks slow, there can be a way to move faster
# Use the dimension in which the rectangle could be moved farther
if placement.x - left_most > placement.y - top_most:
placement.x = left_most
else:
placement.y = top_most
def _select_anchor(self, rect_width, rect_height, total_packing_area_width,
total_packing_area_height):
"""
Searches for a free anchor and recursively enlarges the packing area
if none can be found.
rect_width,rect_height: width,height of the rectangle to be placed.
total_packing_area_width,total_packing_area_height: width,height of the tested packing area.
Return: ondex of the anchor the rectangle is to be placed at or -1 if the rectangle
does not fit in the packing area anymore.
"""
# Try to locate an anchor powhere the rectangle fits in
free_anchor_idx = self._find_anchor(rect_width, rect_height,
total_packing_area_width, total_packing_area_height)
# If a the rectangle fits without resizing packing area (any further in case
# of a recursive call), take over the new packing area size and return the
# anchor at which the rectangle can be placed.
if free_anchor_idx != -1:
self._actual_packing_area_width = total_packing_area_width
self._actual_packing_area_height = total_packing_area_height
return free_anchor_idx
# If we reach self.point, the rectangle did not fit in the current packing
# area and our only choice is to try and enlarge the packing area.
# For readability, determine whether the packing area can be enlarged
# any further in its width and in its height
can_enlarge_width = total_packing_area_width < self._max_packing_area_width
can_enlarge_height = total_packing_area_height < self._max_packing_area_height
should_enlarge_height = (not can_enlarge_width) or (total_packing_area_height <
total_packing_area_width)
# Try to enlarge the smaller of the two dimensions first (unless the smaller
# dimension is already at its maximum size). 'shouldEnlargeHeight' is True
# when the height was the smaller dimension or when the width is maxed out.
if can_enlarge_height and should_enlarge_height:
# Try to double the height of the packing area
return self._select_anchor(rect_width, rect_height, total_packing_area_width,
min(total_packing_area_height * 2,
self._max_packing_area_height))
elif can_enlarge_width:
# Try to double the width of the packing area
return self._select_anchor(rect_width, rect_height,
min(total_packing_area_width * 2,
self._max_packing_area_width),
total_packing_area_height)
else:
# Both dimensions are at their maximum sizes and the rectangle still
# didn't fit. We give up!
return -1
def _find_anchor(self, rect_width, rect_height, total_packing_area_width,
total_packing_area_heigh):
"""
Locates the first free anchor at which the rectangle fits.
rect_width,rect_height: width,height of the rectangle to be placed.
total_packing_area_width,total_packing_area_heigh: total width,height of the packing area.
returns: the index of the first free anchor or -1 if none is found.
"""
possible_pos = [0, 0, rect_width, rect_height]
# Walk over all anchors (which are ordered by their distance to the
# upper left corner of the packing area) until one is discovered that
# can house the new rectangle.
self_anchors = self._anchors
self_is_free = self._is_free
for i in range(len(self_anchors)): # low level loop for Psyco
possible_pos[0] = self_anchors[i].x
possible_pos[1] = self_anchors[i].y
# See if the rectangle would fit in at self.anchor point
if self_is_free(possible_pos, total_packing_area_width, total_packing_area_heigh):
return i
# No anchor points were found where the rectangle would fit in
return -1
def _is_free(self, rect, total_packing_area_width, total_packing_area_height):
"""
Determines whether the rectangle can be placed in the packing area
at its current location.
rect: Rectangle whose position to check.
total_packing_area_width,total_packing_area_height: total width,height of the packing area.
returns: True if the rectangle can be placed at its current position.
"""
# If the rectangle is partially or completely outside of the packing
# area, it can't be placed at its current location
if (rect[0] < 0) or (rect[1] < 0) or \
((rect[0] + rect[2]) > total_packing_area_width) or \
((rect[1] + rect[3]) > total_packing_area_height):
return False
# Brute-force search whether the rectangle touches any of the other
# rectangles already in the packing area
# return not any(r.intersects(rect) for r in self._rectangles) #slower
# quick test, this eliminates 99%+ rects
self_bitmatrix = self._bitmatrix
pos = rect[0] + rect[1] * self._max_packing_area_width
if self_bitmatrix[pos] or self_bitmatrix[pos + rect[2] - 1] or \
self_bitmatrix[rect[0] + (rect[1] + rect[3] - 1) * self._max_packing_area_width] \
or \
self_bitmatrix[rect[0] + rect[2] - 1 + (rect[1] + rect[3] - 1) *
self._max_packing_area_width]:
return False
# full test
shift = rect[1] * self._max_packing_area_width
for y in range(rect[3]):
start_line_pos = rect[0] + shift
shift += self._max_packing_area_width
for i in range(start_line_pos, rect[2] + start_line_pos):
if self_bitmatrix[i]:
return False
# Success! The rectangle is inside the packing area and doesn't overlap
# with any other rectangles that have already been packed.
return True |
py | b41260c45bd637f623ddd67cb75793f159fb9b81 | # coding:UTF-8
# Copyright 2017 The Xiaoyu Fang. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy
import talib
class ChartFeature(object):
def __init__(self, selector):
self.selector = selector
self.supported = {"ROCP", "OROCP", "HROCP", "LROCP", "MACD", "RSI", "VROCP", "BOLL", "MA", "VMA", "PRICE_VOLUME"}
self.feature = []
def moving_extract(self, window=30, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
volumes=None, with_label=True, flatten=True):
self.extract(open_prices=open_prices, close_prices=close_prices, high_prices=high_prices, low_prices=low_prices,
volumes=volumes)
feature_arr = numpy.asarray(self.feature)
p = 0
# rows = feature_arr.shape[0]
# print("feature dimension: %s" % rows)
if with_label:
moving_features = []
moving_labels = []
while p + window <= feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
# y = cmp(close_prices[p + window], close_prices[p + window - 1]) + 1
if p + window < feature_arr.shape[1]:
p_change = (close_prices[p + window] - close_prices[p + window - 1]) / close_prices[p + window - 1]
else:
p_change = 0
# use percent of change as label
y = p_change
if flatten:
x = x.flatten("F")
moving_features.append(numpy.nan_to_num(x))
moving_labels.append(y)
p += 1
return numpy.asarray(moving_features), numpy.asarray(moving_labels)
else:
moving_features = []
while p + window <= feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
if flatten:
x = x.flatten("F")
moving_features.append(numpy.nan_to_num(x))
p += 1
return moving_features
def extract(self, open_prices=None, close_prices=None, high_prices=None, low_prices=None, volumes=None):
self.feature = []
for feature_type in self.selector:
if feature_type in self.supported:
# print("extracting feature : %s" % feature_type)
self.extract_by_type(feature_type, open_prices=open_prices, close_prices=close_prices,
high_prices=high_prices, low_prices=low_prices, volumes=volumes)
else:
print("feature type not supported: %s" % feature_type)
# self.feature_distribution()
return self.feature
def feature_distribution(self):
k = 0
for feature_column in self.feature:
fc = numpy.nan_to_num(feature_column)
mean = numpy.mean(fc)
var = numpy.var(fc)
max_value = numpy.max(fc)
min_value = numpy.min(fc)
print("[%s_th feature] mean: %s, var: %s, max: %s, min: %s" % (k, mean, var, max_value, min_value))
k = k + 1
def extract_by_type(self, feature_type, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
volumes=None):
if feature_type == 'ROCP':
rocp = talib.ROCP(close_prices, timeperiod=1)
self.feature.append(rocp)
if feature_type == 'OROCP':
orocp = talib.ROCP(open_prices, timeperiod=1)
self.feature.append(orocp)
if feature_type == 'HROCP':
hrocp = talib.ROCP(high_prices, timeperiod=1)
self.feature.append(hrocp)
if feature_type == 'LROCP':
lrocp = talib.ROCP(low_prices, timeperiod=1)
self.feature.append(lrocp)
if feature_type == 'MACD':
macd, signal, hist = talib.MACD(close_prices, fastperiod=12, slowperiod=26, signalperiod=9)
norm_signal = numpy.minimum(numpy.maximum(numpy.nan_to_num(signal), -1), 1)
norm_hist = numpy.minimum(numpy.maximum(numpy.nan_to_num(hist), -1), 1)
norm_macd = numpy.minimum(numpy.maximum(numpy.nan_to_num(macd), -1), 1)
zero = numpy.asarray([0])
macdrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(macd)))), -1), 1)
signalrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(signal)))), -1), 1)
histrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(hist)))), -1), 1)
self.feature.append(norm_macd)
self.feature.append(norm_signal)
self.feature.append(norm_hist)
self.feature.append(macdrocp)
self.feature.append(signalrocp)
self.feature.append(histrocp)
if feature_type == 'RSI':
rsi6 = talib.RSI(close_prices, timeperiod=6)
rsi12 = talib.RSI(close_prices, timeperiod=12)
rsi24 = talib.RSI(close_prices, timeperiod=24)
rsi6rocp = talib.ROCP(rsi6 + 100., timeperiod=1)
rsi12rocp = talib.ROCP(rsi12 + 100., timeperiod=1)
rsi24rocp = talib.ROCP(rsi24 + 100., timeperiod=1)
self.feature.append(rsi6 / 100.0 - 0.5)
self.feature.append(rsi12 / 100.0 - 0.5)
self.feature.append(rsi24 / 100.0 - 0.5)
# self.feature.append(numpy.maximum(rsi6 / 100.0 - 0.8, 0))
# self.feature.append(numpy.maximum(rsi12 / 100.0 - 0.8, 0))
# self.feature.append(numpy.maximum(rsi24 / 100.0 - 0.8, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
self.feature.append(rsi6rocp)
self.feature.append(rsi12rocp)
self.feature.append(rsi24rocp)
if feature_type == 'VROCP':
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
# norm_volumes = (volumes - numpy.mean(volumes)) / math.sqrt(numpy.var(volumes))
# vrocp = talib.ROCP(norm_volumes + numpy.max(norm_volumes) - numpy.min(norm_volumes), timeperiod=1)
# self.feature.append(norm_volumes)
self.feature.append(vrocp)
if feature_type == 'BOLL':
upperband, middleband, lowerband = talib.BBANDS(close_prices, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
self.feature.append((upperband - close_prices) / close_prices)
self.feature.append((middleband - close_prices) / close_prices)
self.feature.append((lowerband - close_prices) / close_prices)
if feature_type == 'MA':
ma5 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=5))
ma10 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=10))
ma20 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=20))
ma30 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=30))
ma60 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=60))
ma90 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=90))
ma120 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=120))
ma180 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=180))
ma360 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=360))
ma720 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=720))
ma5rocp = talib.ROCP(ma5, timeperiod=1)
ma10rocp = talib.ROCP(ma10, timeperiod=1)
ma20rocp = talib.ROCP(ma20, timeperiod=1)
ma30rocp = talib.ROCP(ma30, timeperiod=1)
ma60rocp = talib.ROCP(ma60, timeperiod=1)
ma90rocp = talib.ROCP(ma90, timeperiod=1)
ma120rocp = talib.ROCP(ma120, timeperiod=1)
ma180rocp = talib.ROCP(ma180, timeperiod=1)
ma360rocp = talib.ROCP(ma360, timeperiod=1)
ma720rocp = talib.ROCP(ma720, timeperiod=1)
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append((ma5 - close_prices) / close_prices)
self.feature.append((ma10 - close_prices) / close_prices)
self.feature.append((ma20 - close_prices) / close_prices)
self.feature.append((ma30 - close_prices) / close_prices)
self.feature.append((ma60 - close_prices) / close_prices)
self.feature.append((ma90 - close_prices) / close_prices)
self.feature.append((ma120 - close_prices) / close_prices)
self.feature.append((ma180 - close_prices) / close_prices)
self.feature.append((ma360 - close_prices) / close_prices)
self.feature.append((ma720 - close_prices) / close_prices)
if feature_type == 'VMA':
ma5 = talib.MA(volumes, timeperiod=5)
ma10 = talib.MA(volumes, timeperiod=10)
ma20 = talib.MA(volumes, timeperiod=20)
ma30 = talib.MA(volumes, timeperiod=30)
ma60 = talib.MA(volumes, timeperiod=60)
ma90 = talib.MA(volumes, timeperiod=90)
ma120 = talib.MA(volumes, timeperiod=120)
ma180 = talib.MA(volumes, timeperiod=180)
ma360 = talib.MA(volumes, timeperiod=360)
ma720 = talib.MA(volumes, timeperiod=720)
ma5rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma5, timeperiod=1)))
ma10rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma10, timeperiod=1)))
ma20rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma20, timeperiod=1)))
ma30rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma30, timeperiod=1)))
ma60rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma60, timeperiod=1)))
ma90rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma90, timeperiod=1)))
ma120rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma120, timeperiod=1)))
ma180rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma180, timeperiod=1)))
ma360rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma360, timeperiod=1)))
ma720rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma720, timeperiod=1)))
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append(numpy.arctan(numpy.nan_to_num((ma5 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma10 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma20 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma30 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma60 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma90 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma120 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma180 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma360 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma720 - volumes) / (volumes + 1))))
if feature_type == 'PRICE_VOLUME':
rocp = talib.ROCP(close_prices, timeperiod=1)
# norm_volumes = (volumes - numpy.mean(volumes)) / math.sqrt(numpy.var(volumes))
# vrocp = talib.ROCP(norm_volumes + numpy.max(norm_volumes) - numpy.min(norm_volumes), timeperiod=1)
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
pv = rocp * vrocp
self.feature.append(pv)
def extract_feature(raw_data, selector, window=30, with_label=True, flatten=True):
chart_feature = ChartFeature(selector)
closes = raw_data.close.values
opens = raw_data.open.values
highs = raw_data.high.values
lows = raw_data.low.values
volumes = raw_data.volume.values
if with_label:
moving_features, moving_labels = chart_feature.moving_extract(window=window, open_prices=opens,
close_prices=closes,
high_prices=highs, low_prices=lows,
volumes=volumes, with_label=with_label,
flatten=flatten)
return moving_features, moving_labels
else:
moving_features = chart_feature.moving_extract(window=window, open_prices=opens, close_prices=closes,
high_prices=highs, low_prices=lows, volumes=volumes,
with_label=with_label, flatten=flatten)
return moving_features
|
py | b41261c18b3e9b2a1c1bdfb92065c105955248c0 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cfitsio(AutotoolsPackage):
"""CFITSIO is a library of C and Fortran subroutines for reading and writing
data files in FITS (Flexible Image Transport System) data format.
"""
homepage = 'http://heasarc.gsfc.nasa.gov/fitsio/'
url = 'http://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio3450.tar.gz'
version('3.450', sha256='bf6012dbe668ecb22c399c4b7b2814557ee282c74a7d5dc704eb17c30d9fb92e')
version('3.420', sha256='6c10aa636118fa12d9a5e2e66f22c6436fb358da2af6dbf7e133c142e2ac16b8')
version('3.410', sha256='a556ac7ea1965545dcb4d41cfef8e4915eeb8c0faa1b52f7ff70870f8bb5734c')
version('3.370', sha256='092897c6dae4dfe42d91d35a738e45e8236aa3d8f9b3ffc7f0e6545b8319c63a')
variant('bzip2', default=True, description='Enable bzip2 support')
variant('shared', default=True, description='Build shared libraries')
depends_on('curl')
depends_on('bzip2', when='+bzip2')
def url_for_version(self, version):
url = 'http://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio{0}.tar.gz'
return url.format(version.joined)
def configure_args(self):
spec = self.spec
extra_args = []
if '+bzip2' in spec:
extra_args.append('--with-bzip2=%s' % spec['bzip2'].prefix),
return extra_args
@property
def build_targets(self):
targets = ['all']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return targets
|
py | b41262026ea5380b81eff880260972513954a774 | text = input()
parentheses = []
for idx in range(len(text)):
if text[idx] == "(":
parentheses.append(idx)
elif text[idx] == ")":
start_idx = parentheses.pop()
print(text[start_idx: idx + 1]) |
py | b412626ebaa4046942d2fc1e4ecf309e6819aebc | import numpy as np
import torch
import os
import argparse
import re
from tacotron2 import Tacotron2
from wavernn import WaveRNN
from utils.text.symbols import symbols
from utils.paths import Paths
from utils.text import text_to_sequence
from utils.display import save_attention, simple_table
from utils.dsp import reconstruct_waveform, save_wav
from utils import hparams as hp
#import hparams as hp
class TaiwaneseTacotron():
def __init__(self, args):
self.args = args
#================ vocoder ================#
if not (self.args.vocoder == "wavernn" or self.args.vocoder == "griffinlim"):
raise argparse.ArgumentError('Must provide a valid vocoder type!')
hp.configure(self.args.hp_file) # Load hparams from file
# set defaults for any arguments that depend on hparams
if self.args.vocoder == 'wavernn':
if self.args.target is None:
self.args.target = hp.voc_target
if self.args.overlap is None:
self.args.overlap = hp.voc_overlap
if self.args.batched is None:
self.args.batched = hp.voc_gen_batched
#================ others ================#
# self.paths = Paths("", hp.voc_model_id, hp.tts_model_id, output_stage=True)
if not self.args.force_cpu and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print('Using device:', device)
# === Wavernn === #
if self.args.vocoder == 'wavernn':
print('\nInitialising WaveRNN Model...\n')
self.voc_model = WaveRNN(rnn_dims=hp.voc_rnn_dims,
fc_dims=hp.voc_fc_dims,
bits=hp.bits,
pad=hp.voc_pad,
upsample_factors=hp.voc_upsample_factors,
feat_dims=hp.num_mels,
compute_dims=hp.voc_compute_dims,
res_out_dims=hp.voc_res_out_dims,
res_blocks=hp.voc_res_blocks,
hop_length=hp.hop_length,
sample_rate=hp.sample_rate,
mode=hp.voc_mode).to(device)
# voc_load_path = self.args.voc_weights if self.args.voc_weights else self.paths.voc_latest_weights
voc_load_path = self.args.voc_weights
self.voc_model.load(voc_load_path)
# === Tacotron2 === #
print('\nInitializing Tacotron2 Model...\n')
self.tts_model = Tacotron2().to(device)
# tts_load_path = self.args.tts_weights if self.args.tts_weights else self.paths.tts_latest_weights
tts_load_path = self.args.tts_weights
self.tts_model.load(tts_load_path)
# === Infomation === #
if self.args.vocoder == 'wavernn':
self.voc_k = self.voc_model.get_step() // 1000
self.tts_k = self.tts_model.get_step() // 1000
simple_table([('Tacotron2', str(self.tts_k) + 'k'),
('Vocoder Type', 'WaveRNN'),
('WaveRNN', str(self.voc_k) + 'k'),
('Generation Mode',
'Batched' if self.args.batched else 'Unbatched'),
('Target Samples',
self.args.target if self.args.batched else 'N/A'),
('Overlap Samples', self.args.overlap if self.args.batched else 'N/A')])
elif self.args.vocoder == 'griffinlim':
self.tts_k = self.tts_model.get_step() // 1000
simple_table([('Tacotron2', str(self.tts_k) + 'k'),
('Vocoder Type', 'Griffin-Lim'),
('GL Iters', self.args.iters)])
def gen_tacotron2(self, inputs):
for i, x in enumerate(inputs, 1):
print(f'\n| Generating {i}/{len(inputs)}')
print(x)
x = np.array(x)[None, :]
x = torch.autograd.Variable(torch.from_numpy(x)).cuda().long()
self.tts_model.eval()
_, mel_outputs_postnet, _, _ = self.tts_model.inference(x)
if mel_outputs_postnet.shape[2] > 2000:
print(mel_outputs_postnet.shape)
# too long, not successful
return False
if self.args.vocoder == 'griffinlim':
v_type = self.args.vocoder
elif self.args.vocoder == 'wavernn' and self.args.batched:
v_type = 'wavernn_batched'
else:
v_type = 'wavernn_unbatched'
# === output === #
# if not self.args.save_dir:
# save_path = self.paths.tts_output / \
# f'{i}_{v_type}_{self.tts_k}k.wav'
# else:
os.makedirs(self.args.save_dir, exist_ok=True)
save_path = os.path.join(
self.args.save_dir, f'{i}_{v_type}_{self.tts_k}k.wav')
if self.args.vocoder == 'wavernn':
m = mel_outputs_postnet
wav = self.voc_model.generate(
m, self.args.batched, hp.voc_target, hp.voc_overlap, hp.mu_law)
save_wav(wav, save_path)
elif self.args.vocoder == 'griffinlim':
m = torch.squeeze(mel_outputs_postnet).detach().cpu().numpy()
wav = reconstruct_waveform(m, n_iter=self.args.iters)
save_wav(wav, save_path)
# return True
def generate(self, input_text=None, file=None):
# generate wavs from a given file
if file is not None:
with open(file) as f:
inputs = [text_to_sequence(
l.strip(), hp.text_cleaners) for l in f]
else:
inputs = [text_to_sequence(input_text.strip(), ['basic_cleaners'])]
self.gen_tacotron2(inputs)
# below is for "Zenbo demo"
# generate one wav from a given text input
# else:
# inputs = [text_to_sequence(input_text.strip(), ['basic_cleaners'])]
# success = self.gen_tacotron2(inputs)
# if not success:
# print("TOO LONG!!!")
# _input = [text_to_sequence(
# 'sit8 le1 tsit8 ku2 gua1 be3 hiau1 koŋ2 .', ['basic_cleaners'])]
# self.gen_tacotron2(_input)
print('\n\nDone.\n')
if __name__ == '__main__':
# Parse Arguments
parser = argparse.ArgumentParser(description='TTS')
parser.add_argument('--tts_weights', type=str,
help='[string/path] Load in different Tacotron weights', default=None)
parser.add_argument('--voc_weights', type=str,
help='[string/path] Load in different WaveRNN weights', default=None)
parser.add_argument('--save_dir', type=str, default=None)
args = parser.parse_args()
args.vocoder = 'wavernn'
args.hp_file = 'hparams.py'
args.save_attn = False
args.batched = True
args.target = None
args.overlap = None
args.force_cpu = False
TTS = TaiwaneseTacotron(args)
TTS.generate(file="sentences.txt")
|
py | b41263006d1ad0c108a82163a9b21109e49a893e | """Implementations of custom flag types for absl.flags."""
import pathlib
from absl import flags as absl_flags
from typing import Callable
class PathParser(absl_flags.ArgumentParser):
"""Parser of path values."""
def __init__(self,
must_exist: bool = True,
exist_ok: bool = True,
is_dir: bool = False):
"""Create a path values parser.
Args:
must_exist: If true, the path mst exist.
exist_ok: If not true, the path must not exist. Not effect if true.
Implied true if must_exist.
is_dir: If true, the path must be a directory.
"""
self.must_exist = must_exist
self.exist_ok = exist_ok
self.is_dir = is_dir
def parse(self, argument) -> pathlib.Path:
"""See base class."""
val = self.convert(argument)
if self.must_exist:
if not val.exists():
raise ValueError('not found')
if self.is_dir and not val.is_dir():
raise ValueError('not a directory')
elif not self.is_dir and not val.is_file():
raise ValueError('not a file')
elif not self.exist_ok and val.exists():
raise ValueError('already exists')
return val
def convert(self, argument: str) -> pathlib.Path:
"""Returns the value of this argument."""
if not argument:
raise TypeError('Path flag must be set')
return pathlib.Path(argument)
class DatabaseParser(absl_flags.ArgumentParser):
"""Parser of path values."""
def __init__(self, database_class, must_exist: bool = True):
"""Create a path values parser.
Args:
must_exist: If true, the database must exist. Else, it is created.
"""
# TODO(cec): Raise TypeError if database_class is not a subclass of
# 'sqlutil.Database'.
self.database_class = database_class
self.must_exist = must_exist
def parse(self, argument) -> 'sqlutil.Database':
"""See base class."""
return self.convert(argument)
def convert(self, argument: str) -> Callable[[], 'sqlutil.Database']:
"""Returns the value of this argument."""
if not argument:
raise TypeError('Path flag must be set')
return lambda: self.database_class(url=argument, must_exist=self.must_exist)
|
py | b412638bde58fedd70debf37e253e4a7f7bd0820 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from torch import nn
from torchvision.ops import boxes as box_ops
from . import _utils as det_utils
class AnchorGenerator(nn.Module):
"""
Module that generates anchors for a set of feature maps and
image sizes.
The module support computing anchors at multiple sizes and aspect ratios
per feature map.
sizes and aspect_ratios should have the same number of elements, and it should
correspond to the number of feature maps.
sizes[i] and aspect_ratios[i] can have an arbitrary number of elements,
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
per spatial location for feature map i.
Arguments:
sizes (Tuple[Tuple[int]]):
aspect_ratios (Tuple[Tuple[float]]):
"""
def __init__(
self,
sizes=(128, 256, 512),
aspect_ratios=(0.5, 1.0, 2.0),
):
super(AnchorGenerator, self).__init__()
if not isinstance(sizes[0], (list, tuple)):
# TODO change this
sizes = tuple((s,) for s in sizes)
if not isinstance(aspect_ratios[0], (list, tuple)):
aspect_ratios = (aspect_ratios,) * len(sizes)
assert len(sizes) == len(aspect_ratios)
self.sizes = sizes
self.aspect_ratios = aspect_ratios
self.cell_anchors = None
self._cache = {}
@staticmethod
def generate_anchors(scales, aspect_ratios, device="cpu"):
scales = torch.as_tensor(scales, dtype=torch.float32, device=device)
aspect_ratios = torch.as_tensor(aspect_ratios, dtype=torch.float32, device=device)
h_ratios = torch.sqrt(aspect_ratios)
w_ratios = 1 / h_ratios
ws = (w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h_ratios[:, None] * scales[None, :]).view(-1)
base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2
return base_anchors.round()
def set_cell_anchors(self, device):
if self.cell_anchors is not None:
return self.cell_anchors
cell_anchors = [
self.generate_anchors(
sizes,
aspect_ratios,
device
)
for sizes, aspect_ratios in zip(self.sizes, self.aspect_ratios)
]
self.cell_anchors = cell_anchors
def num_anchors_per_location(self):
return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]
def grid_anchors(self, grid_sizes, strides):
anchors = []
for size, stride, base_anchors in zip(
grid_sizes, strides, self.cell_anchors
):
grid_height, grid_width = size
stride_height, stride_width = stride
device = base_anchors.device
shifts_x = torch.arange(
0, grid_width, dtype=torch.float32, device=device
) * stride_width
shifts_y = torch.arange(
0, grid_height, dtype=torch.float32, device=device
) * stride_height
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append(
(shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
)
return anchors
def cached_grid_anchors(self, grid_sizes, strides):
key = tuple(grid_sizes) + tuple(strides)
if key in self._cache:
return self._cache[key]
anchors = self.grid_anchors(grid_sizes, strides)
self._cache[key] = anchors
return anchors
def forward(self, image_list, feature_maps):
grid_sizes = tuple([feature_map.shape[-2:] for feature_map in feature_maps])
image_size = image_list.tensors.shape[-2:]
strides = tuple((image_size[0] / g[0], image_size[1] / g[1]) for g in grid_sizes)
self.set_cell_anchors(feature_maps[0].device)
anchors_over_all_feature_maps = self.cached_grid_anchors(grid_sizes, strides)
anchors = []
for i, (image_height, image_width) in enumerate(image_list.image_sizes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
anchors_in_image.append(anchors_per_feature_map)
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
return anchors
class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
Arguments:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
def __init__(self, in_channels, num_anchors):
super(RPNHead, self).__init__()
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
for l in self.children():
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
t = F.relu(self.conv(feature))
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return logits, bbox_reg
def permute_and_flatten(layer, N, A, C, H, W):
layer = layer.view(N, -1, C, H, W)
layer = layer.permute(0, 3, 4, 1, 2)
layer = layer.reshape(N, -1, C)
return layer
def concat_box_prediction_layers(box_cls, box_regression):
box_cls_flattened = []
box_regression_flattened = []
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_regression
for box_cls_per_level, box_regression_per_level in zip(
box_cls, box_regression
):
N, AxC, H, W = box_cls_per_level.shape
Ax4 = box_regression_per_level.shape[1]
A = Ax4 // 4
C = AxC // A
box_cls_per_level = permute_and_flatten(
box_cls_per_level, N, A, C, H, W
)
box_cls_flattened.append(box_cls_per_level)
box_regression_per_level = permute_and_flatten(
box_regression_per_level, N, A, 4, H, W
)
box_regression_flattened.append(box_regression_per_level)
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls = torch.cat(box_cls_flattened, dim=1).reshape(-1, C)
box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4)
return box_cls, box_regression
class RegionProposalNetwork(torch.nn.Module):
"""
Implements Region Proposal Network (RPN).
Arguments:
anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
head (nn.Module): module that computes the objectness and regression deltas
fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
pre_nms_top_n (Dict[int]): number of proposals to keep before applying NMS. It should
contain two fields: training and testing, to allow for different values depending
on training or evaluation
post_nms_top_n (Dict[int]): number of proposals to keep after applying NMS. It should
contain two fields: training and testing, to allow for different values depending
on training or evaluation
nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
"""
def __init__(self,
anchor_generator,
head,
#
fg_iou_thresh, bg_iou_thresh,
batch_size_per_image, positive_fraction,
#
pre_nms_top_n, post_nms_top_n, nms_thresh):
super(RegionProposalNetwork, self).__init__()
self.anchor_generator = anchor_generator
self.head = head
self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
# used during training
self.box_similarity = box_ops.box_iou
self.proposal_matcher = det_utils.Matcher(
fg_iou_thresh,
bg_iou_thresh,
allow_low_quality_matches=True,
)
self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(
batch_size_per_image, positive_fraction
)
# used during testing
self._pre_nms_top_n = pre_nms_top_n
self._post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = 1e-3
@property
def pre_nms_top_n(self):
if self.training:
return self._pre_nms_top_n['training']
return self._pre_nms_top_n['testing']
@property
def post_nms_top_n(self):
if self.training:
return self._post_nms_top_n['training']
return self._post_nms_top_n['testing']
def assign_targets_to_anchors(self, anchors, targets):
labels = []
matched_gt_boxes = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
gt_boxes = targets_per_image["boxes"]
match_quality_matrix = self.box_similarity(gt_boxes, anchors_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
if gt_boxes.shape[0] == 0:
matched_gt_boxes_per_image = gt_boxes
else:
matched_gt_boxes_per_image = gt_boxes[matched_idxs.clamp(min=0)]
labels_per_image = matched_idxs >= 0
labels_per_image = labels_per_image.to(dtype=torch.float32)
# Background (negative examples)
bg_indices = matched_idxs == self.proposal_matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_indices] = 0
# discard indices that are between thresholds
inds_to_discard = matched_idxs == self.proposal_matcher.BETWEEN_THRESHOLDS
labels_per_image[inds_to_discard] = -1
labels.append(labels_per_image)
matched_gt_boxes.append(matched_gt_boxes_per_image)
return labels, matched_gt_boxes
def _get_top_n_idx(self, objectness, num_anchors_per_level):
r = []
offset = 0
for ob in objectness.split(num_anchors_per_level, 1):
num_anchors = ob.shape[1]
pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
_, top_n_idx = ob.topk(pre_nms_top_n, dim=1)
r.append(top_n_idx + offset)
offset += num_anchors
return torch.cat(r, dim=1)
def filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_level):
num_images = proposals.shape[0]
device = proposals.device
# do not backprop throught objectness
objectness = objectness.detach()
objectness = objectness.reshape(num_images, -1)
levels = [
torch.full((n,), idx, dtype=torch.int64, device=device)
for idx, n in enumerate(num_anchors_per_level)
]
levels = torch.cat(levels, 0)
levels = levels.reshape(1, -1).expand_as(objectness)
# select top_n boxes independently per level before applying nms
top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)
batch_idx = torch.arange(num_images, device=device)[:, None]
objectness = objectness[batch_idx, top_n_idx]
levels = levels[batch_idx, top_n_idx]
proposals = proposals[batch_idx, top_n_idx]
final_boxes = []
final_scores = []
for boxes, scores, lvl, img_shape in zip(proposals, objectness, levels, image_shapes):
boxes = box_ops.clip_boxes_to_image(boxes, img_shape)
keep = box_ops.remove_small_boxes(boxes, self.min_size)
boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
# non-maximum suppression, independently done per level
keep = box_ops.batched_nms(boxes, scores, lvl, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[:self.post_nms_top_n]
boxes, scores = boxes[keep], scores[keep]
final_boxes.append(boxes)
final_scores.append(scores)
return final_boxes, final_scores
def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets):
"""
Arguments:
objectness (Tensor)
pred_bbox_deltas (Tensor)
labels (List[Tensor])
regression_targets (List[Tensor])
Returns:
objectness_loss (Tensor)
box_loss (Tensor
"""
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
objectness = objectness.flatten()
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = F.l1_loss(
pred_bbox_deltas[sampled_pos_inds],
regression_targets[sampled_pos_inds],
reduction="sum",
) / (sampled_inds.numel())
objectness_loss = F.binary_cross_entropy_with_logits(
objectness[sampled_inds], labels[sampled_inds]
)
return objectness_loss, box_loss
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (List[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (List[Dict[Tensor]): ground-truth boxes present in the image (optional).
If provided, each element in the dict should contain a field `boxes`,
with the locations of the ground-truth boxes.
Returns:
boxes (List[Tensor]): the predicted boxes from the RPN, one Tensor per
image.
losses (Dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
# RPN uses all feature maps that are available
features = list(features.values())
objectness, pred_bbox_deltas = self.head(features)
anchors = self.anchor_generator(images, features)
num_images = len(anchors)
num_anchors_per_level = [o[0].numel() for o in objectness]
objectness, pred_bbox_deltas = \
concat_box_prediction_layers(objectness, pred_bbox_deltas)
# apply pred_bbox_deltas to anchors to obtain the decoded proposals
# note that we detach the deltas because Faster R-CNN do not backprop through
# the proposals
proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
proposals = proposals.view(num_images, -1, 4)
boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)
losses = {}
if self.training:
labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)
regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)
loss_objectness, loss_rpn_box_reg = self.compute_loss(
objectness, pred_bbox_deltas, labels, regression_targets)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return boxes, losses
|
py | b4126454228631001af11182703e6c4058380500 | import unittest
from history import History
from history_query_builder import HistoryQueryBuilder
from data_generation import DataGeneration
from recovery_engine import RecoveryEngine
from app_config import AppConfig
class RecoveryTest (unittest.TestCase):
def test_not_recoverable(self):
input_str = 'w1[x] w1[y] r2[u] w2[x] r2[y] w2[y] c2 w1[z] c1'
history = HistoryQueryBuilder(input_str).process()
recovery_engine = RecoveryEngine(history)
report = recovery_engine.get_report()
self.assertFalse(report.is_recoverable())
self.assertFalse(report.is_cascadeless())
self.assertFalse(report.is_strict())
def test_recoverable_not_aca(self):
input_str = 'w1[x] w1[y] r2[u] w2[x] r2[y] w2[y] w1[z] c1 c2'
history = HistoryQueryBuilder(input_str).process()
recovery_engine = RecoveryEngine(history)
report = recovery_engine.get_report()
self.assertTrue(report.is_recoverable())
self.assertFalse(report.is_cascadeless())
self.assertFalse(report.is_strict())
def test_aca_not_strict(self):
input_str = 'w1[x] w1[y] r2[u] w2[x] w1[z] c1 r2[y] w2[y] c2'
history = HistoryQueryBuilder(input_str).process()
recovery_engine = RecoveryEngine(history)
report = recovery_engine.get_report()
self.assertTrue(report.is_recoverable())
self.assertTrue(report.is_cascadeless())
self.assertFalse(report.is_strict())
def test_strict(self):
input_str = 'r1[x] w1[x] c1 r2[x] w2[x] c2'
history = HistoryQueryBuilder(input_str).process()
recovery_engine = RecoveryEngine(history)
report = recovery_engine.get_report()
self.assertTrue(report.is_recoverable())
self.assertTrue(report.is_cascadeless())
self.assertTrue(report.is_strict())
if __name__ == '__main__':
unittest.main()
|
py | b41266183678df3898dc527e2d5cf7f17173d0e8 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.core.exceptions import HttpResponseError
from azure.search.documents.aio import SearchClient
from devtools_testutils.aio import recorded_by_proxy_async
from devtools_testutils import AzureRecordedTestCase
from search_service_preparer import SearchEnvVarPreparer, search_decorator
class TestClientTestAsync(AzureRecordedTestCase):
@SearchEnvVarPreparer()
@search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json")
@recorded_by_proxy_async
async def test_search_client(self, endpoint, api_key, index_name):
client = SearchClient(endpoint, index_name, api_key)
async with client:
await self._test_get_search_simple(client)
await self._test_get_search_simple_with_top(client)
await self._test_get_search_filter(client)
await self._test_get_search_filter_array(client)
await self._test_get_search_counts(client)
await self._test_get_search_coverage(client)
await self._test_get_search_facets_none(client)
await self._test_get_search_facets_result(client)
await self._test_autocomplete(client)
await self._test_suggest(client)
async def _test_get_search_simple(self, client):
results = []
async for x in await client.search(search_text="hotel"):
results.append(x)
assert len(results) == 7
results = []
async for x in await client.search(search_text="motel"):
results.append(x)
assert len(results) == 2
async def _test_get_search_simple_with_top(self, client):
results = []
async for x in await client.search(search_text="hotel", top=3):
results.append(x)
assert len(results) == 3
results = []
async for x in await client.search(search_text="motel", top=3):
results.append(x)
assert len(results) == 2
async def _test_get_search_filter(self, client):
results = []
select = ["hotelName", "category", "description"]
async for x in await client.search(
search_text="WiFi",
filter="category eq 'Budget'",
select=",".join(select),
order_by="hotelName desc"
):
results.append(x)
assert [x["hotelName"] for x in results] == sorted(
[x["hotelName"] for x in results], reverse=True
)
expected = {
"category",
"hotelName",
"description",
"@search.score",
"@search.highlights",
}
assert all(set(x) == expected for x in results)
assert all(x["category"] == "Budget" for x in results)
async def _test_get_search_filter_array(self, client):
results = []
select = ["hotelName", "category", "description"]
async for x in await client.search(
search_text="WiFi",
filter="category eq 'Budget'",
select=select,
order_by="hotelName desc"
):
results.append(x)
assert [x["hotelName"] for x in results] == sorted(
[x["hotelName"] for x in results], reverse=True
)
expected = {
"category",
"hotelName",
"description",
"@search.score",
"@search.highlights",
}
assert all(set(x) == expected for x in results)
assert all(x["category"] == "Budget" for x in results)
async def _test_get_search_counts(self, client):
results = await client.search(search_text="hotel")
assert await results.get_count() is None
results = await client.search(search_text="hotel", include_total_count=True)
assert await results.get_count() == 7
async def _test_get_search_coverage(self, client):
results = await client.search(search_text="hotel")
assert await results.get_coverage() is None
results = await client.search(search_text="hotel", minimum_coverage=50.0)
cov = await results.get_coverage()
assert isinstance(cov, float)
assert cov >= 50.0
async def _test_get_search_facets_none(self, client):
select = ("hotelName", "category", "description")
results = await client.search(
search_text="WiFi",
select=",".join(select)
)
assert await results.get_facets() is None
async def _test_get_search_facets_result(self, client):
select = ("hotelName", "category", "description")
results = await client.search(
search_text="WiFi",
facets=["category"],
select=",".join(select)
)
assert await results.get_facets() == {
"category": [
{"value": "Budget", "count": 4},
{"value": "Luxury", "count": 1},
]
}
async def _test_autocomplete(self, client):
results = await client.autocomplete(search_text="mot", suggester_name="sg")
assert results == [{"text": "motel", "query_plus_text": "motel"}]
async def _test_suggest(self, client):
results = await client.suggest(search_text="mot", suggester_name="sg")
assert results == [
{"hotelId": "2", "text": "Cheapest hotel in town. Infact, a motel."},
{"hotelId": "9", "text": "Secret Point Motel"},
]
@SearchEnvVarPreparer()
@search_decorator(schema="hotel_schema.json", index_batch="hotel_large.json")
@recorded_by_proxy_async
async def test_search_client_large(self, endpoint, api_key, index_name):
client = SearchClient(endpoint, index_name, api_key)
async with client:
await self._test_get_search_simple_large(client)
async def _test_get_search_simple_large(self, client):
results = []
async for x in await client.search(search_text = ''):
results.append(x)
assert len(results) == 60
|
py | b41267e3f20884c79f57227d49c0496f5ce851f7 | # Licensed under the Prefect Community License, available at
# https://www.prefect.io/legal/prefect-community-license
from typing import Any, List
from graphql import GraphQLResolveInfo
from prefect_server import api
from prefect_server.database import models
from prefect_server.utilities.graphql import mutation
from prefect_server.utilities import exceptions
@mutation.field("set_schedule_active")
async def resolve_set_schedule_active(
obj: Any, info: GraphQLResolveInfo, input: dict
) -> dict:
return {"success": await api.schedules.set_active(schedule_id=input["schedule_id"])}
@mutation.field("set_schedule_inactive")
async def resolve_set_schedule_inactive(
obj: Any, info: GraphQLResolveInfo, input: dict
) -> dict:
return {
"success": await api.schedules.set_inactive(schedule_id=input["schedule_id"])
}
@mutation.field("schedule_flow_runs")
async def resolve_schedule_flow_runs(
obj: Any, info: GraphQLResolveInfo, input: dict
) -> List[dict]:
run_ids = await api.schedules.schedule_flow_runs(
schedule_id=input["schedule_id"],
max_runs=input.get("max_runs"),
seconds_since_last_checked=60,
)
return [{"id": i} for i in run_ids]
|
py | b412698911c09b609490f46e8fadfd8ce8dc81e0 | import requests
from django.db.models import Q
from dataclasses import dataclass
from openmrs_viamo.models import Visit, MissedAppointment
from datetime import datetime, timedelta, date
from core.utils.constants import Constants
from core.utils.data_conversion import DataConversion
@dataclass
class FetchOpenMRSData:
def get_arv_dispensing(self, instance):
start_date = date.today() + timedelta(days=4)
end_date = start_date + timedelta(days=4)
params = {
'startDate': str(start_date),
'endDate': str(end_date)
}
try:
response = requests.get(instance, params=params, auth=Constants.openmrs_auth.value)
data_list = response.json()['rows']
return data_list
except requests.exceptions.RequestException as err:
print(err)
def get_misssed_appointment(self, instance):
end_date = date.today() - timedelta(days=2)
params = {
'endDate': str(end_date)
}
try:
response = requests.get(instance, params=params, auth=Constants.openmrs_auth.value)
data_list = response.json()['rows']
return data_list
except requests.exceptions.RequestException as err:
print(err)
@dataclass
class AddDataToMiddleware:
@staticmethod
def add_arv_dispensing(province, instance):
data_list = FetchOpenMRSData().get_arv_dispensing(instance)
if data_list is not None:
for data in data_list:
visit, created = Visit.objects.get_or_create(
province=province,
district=data['Distrito'],
health_facility=data['us'],
patient_id=data['patient_id'],
patient_name=data['NomeCompleto'],
patient_identifier=data['NID'],
age=data['age'],
phone_number=data['phone_number'],
appointment_date=datetime.fromtimestamp(data['dispensing_date'] / 1e3),
next_appointment_date=datetime.fromtimestamp(data['next_dispensing_date'] / 1e3),
gender=data['gender'],
community=data['Bairro'],
pregnant=data['pregnant'],
brestfeeding=data['brestfeeding'],
tb=data['tb']
)
visit.save()
@staticmethod
def add_missed_appointments(province, instance):
data_list = FetchOpenMRSData().get_misssed_appointment(instance)
if data_list is not None:
for data in data_list:
missed_appointment, created = MissedAppointment.objects.get_or_create(
province=province,
district=data['Distrito'],
health_facility=data['us'],
patient_id=data['patient_id'],
patient_name=data['nome'],
patient_identifier=data['NID'],
age=data['idade_actual'],
phone_number=data['Telefone'],
last_appointment_date=DataConversion.convert_int_date(data['ultimo_lev']), #datetime.fromtimestamp(data['ultimo_lev'] / 1e3),
gender=data['gender'],
community=data['Bairro'],
pregnant=data['p_gestante'],
drug_pickup_missed_days=data['dias_falta_lev'],
visit_missed_days=data['dias_falta_seg']
)
missed_appointment.save()
@dataclass
class PostData:
api_url = Constants.viamo_api_url.value
api_key = Constants.viamo_api_key.value
@classmethod
def post_sms_reminder(cls):
payload_list = []
visit = Visit.objects.exclude(phone_number=None).filter(synced=False)
for v in visit:
phone = v.phone_number.strip()
payload = {
"api_key": cls.api_key,
"phone": phone[:9],
"receive_voice": "1",
"receive_sms": "1",
"preferred_channel": "1",
"groups": "463089",
"active": "1",
}
data_values = {
"patient_identifier": v.patient_identifier,
"appointment_date": '{:%Y-%m-%d}'.format(v.appointment_date),
"gender": v.gender,
"pregnant": v.pregnant,
"age": v.age,
"district": v.district,
"province": v.province,
"health_facility": v.health_facility
}
payload['property'] = data_values
payload_list.append(payload)
v.synced = True
v.save()
records = 0
records_not_sent = []
try:
for data in payload_list:
response = requests.post(cls.api_url, json=data)
print(f'Sending {records} of {len(payload_list)} Records')
if response.status_code == 200:
records += 1
else:
records_not_sent.append(data.copy())
print(records_not_sent)
except requests.exceptions.RequestException as err:
print(err)
@classmethod
def post_missed_appointment(cls):
payload_list = []
missed_appointment = MissedAppointment.objects.exclude(phone_number=None).filter(synced=False, drug_pickup_missed_days__gt=0, visit_missed_days__gt=0)
for m in missed_appointment:
phone = m.phone_number.strip()
payload = {
"api_key": cls.api_key,
"phone": phone[:9],
"receive_voice": "1",
"receive_sms": "1",
"preferred_channel": "1",
"groups": "485273",
"active": "1",
}
data_values = {
"patient_identifier": m.patient_identifier,
"last_appointment_date": '{:%Y-%m-%d}'.format(m.last_appointment_date),
"gender": m.gender,
"pregnant": m.pregnant,
"age": m.age,
"district": m.district,
"province": m.province,
"health_facility": m.health_facility
}
payload['property'] = data_values
payload_list.append(payload)
m.synced = True
m.save()
records = 0
records_not_sent = []
try:
for data in payload_list:
records += 1
response = requests.post(cls.api_url, json=data)
print(f'Sending {records} of {len(payload_list)} Records')
if response.status_code == 200:
records += 1
# missed = MissedAppointment.objects.get(patient_identifier=data['property']['patient_identifier'])
# missed.synced = True
# missed.save()
else:
records_not_sent.append(data.copy())
print(records_not_sent)
except requests.exceptions.RequestException as err:
print(err)
|
py | b4126a36713158c25e00672633f5f2c91179264d | import json
import logging
import uuid
from galaxy import (
exceptions,
model
)
from galaxy.managers import histories
from galaxy.tools.parameters.meta import expand_workflow_inputs
from galaxy.workflow.resources import get_resource_mapper_function
INPUT_STEP_TYPES = ['data_input', 'data_collection_input', 'parameter_input']
log = logging.getLogger(__name__)
class WorkflowRunConfig:
""" Wrapper around all the ways a workflow execution can be parameterized.
:param target_history: History to execute workflow in.
:type target_history: galaxy.model.History.
:param replacement_dict: Workflow level parameters used for renaming post
job actions.
:type replacement_dict: dict
:param copy_inputs_to_history: Should input data parameters be copied to
target_history. (Defaults to False)
:type copy_inputs_to_history: bool
:param inputs: Map from step ids to dict's containing HDA for these steps.
:type inputs: dict
:param inputs_by: How inputs maps to inputs (datasets/collections) to workflows
steps - by unencoded database id ('step_id'), index in workflow
'step_index' (independent of database), or by input name for
that step ('name').
:type inputs_by: str
:param param_map: Override step parameters - should be dict with step id keys and
tool param name-value dicts as values.
:type param_map: dict
"""
def __init__(self, target_history,
replacement_dict,
copy_inputs_to_history=False,
inputs=None,
param_map=None,
allow_tool_state_corrections=False,
use_cached_job=False,
resource_params=None):
self.target_history = target_history
self.replacement_dict = replacement_dict
self.copy_inputs_to_history = copy_inputs_to_history
self.inputs = inputs or {}
self.param_map = param_map or {}
self.resource_params = resource_params or {}
self.allow_tool_state_corrections = allow_tool_state_corrections
self.use_cached_job = use_cached_job
def _normalize_inputs(steps, inputs, inputs_by):
normalized_inputs = {}
for step in steps:
if step.type not in INPUT_STEP_TYPES:
continue
possible_input_keys = []
for inputs_by_el in inputs_by.split("|"):
if inputs_by_el == "step_id":
possible_input_keys.append(str(step.id))
elif inputs_by_el == "step_index":
possible_input_keys.append(str(step.order_index))
elif inputs_by_el == "step_uuid":
possible_input_keys.append(str(step.uuid))
elif inputs_by_el == "name":
possible_input_keys.append(step.label or step.tool_inputs.get('name'))
else:
raise exceptions.MessageException("Workflow cannot be run because unexpected inputs_by value specified.")
inputs_key = None
for possible_input_key in possible_input_keys:
if possible_input_key in inputs:
inputs_key = possible_input_key
default_value = step.tool_inputs.get("default")
optional = step.tool_inputs.get("optional") or False
# Need to be careful here to make sure 'default' has correct type - not sure how to do that
# but asserting 'optional' is definitely a bool and not a String->Bool or something is a good
# start to ensure tool state is being preserved and loaded in a type safe way.
assert isinstance(optional, bool)
if not inputs_key and default_value is None and not optional:
message = "Workflow cannot be run because an expected input step '{}' ({}) is not optional and no input.".format(step.id, step.label)
raise exceptions.MessageException(message)
if inputs_key:
normalized_inputs[step.id] = inputs[inputs_key]
return normalized_inputs
def _normalize_step_parameters(steps, param_map, legacy=False, already_normalized=False):
""" Take a complex param_map that can reference parameters by
step_id in the new flexible way or in the old one-parameter
per step fashion or by tool id and normalize the parameters so
everything is referenced by a numeric step id.
"""
normalized_param_map = {}
for step in steps:
if already_normalized:
param_dict = param_map.get(str(step.order_index), {})
else:
param_dict = _step_parameters(step, param_map, legacy=legacy)
if step.type == "subworkflow" and param_dict:
if not already_normalized:
raise exceptions.RequestParameterInvalidException("Specifying subworkflow step parameters requires already_normalized to be specified as true.")
subworkflow_param_dict = {}
for key, value in param_dict.items():
step_index, param_name = key.split("|", 1)
if step_index not in subworkflow_param_dict:
subworkflow_param_dict[step_index] = {}
subworkflow_param_dict[step_index][param_name] = value
param_dict = _normalize_step_parameters(step.subworkflow.steps, subworkflow_param_dict, legacy=legacy, already_normalized=already_normalized)
if param_dict:
normalized_param_map[step.id] = param_dict
return normalized_param_map
def _step_parameters(step, param_map, legacy=False):
"""
Update ``step`` parameters based on the user-provided ``param_map`` dict.
``param_map`` should be structured as follows::
PARAM_MAP = {STEP_ID_OR_UUID: PARAM_DICT, ...}
PARAM_DICT = {NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``param_map``::
PARAM_MAP = {TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
PARAM_DICT = {'param': NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
"""
param_dict = param_map.get(step.tool_id, {}).copy()
if legacy:
param_dict.update(param_map.get(str(step.id), {}))
else:
param_dict.update(param_map.get(str(step.order_index), {}))
step_uuid = step.uuid
if step_uuid:
uuid_params = param_map.get(str(step_uuid), {})
param_dict.update(uuid_params)
if param_dict:
if 'param' in param_dict and 'value' in param_dict:
param_dict[param_dict['param']] = param_dict['value']
del param_dict['param']
del param_dict['value']
# Inputs can be nested dict, but Galaxy tool code wants nesting of keys (e.g.
# cond1|moo=4 instead of cond1: {moo: 4} ).
new_params = _flatten_step_params(param_dict)
return new_params
def _flatten_step_params(param_dict, prefix=""):
# TODO: Temporary work around until tool code can process nested data
# structures. This should really happen in there so the tools API gets
# this functionality for free and so that repeats can be handled
# properly. Also the tool code walks the tool inputs so it nows what is
# a complex value object versus something that maps to child parameters
# better than the hack or searching for src and id here.
new_params = {}
for key in list(param_dict.keys()):
if prefix:
effective_key = "{}|{}".format(prefix, key)
else:
effective_key = key
value = param_dict[key]
if isinstance(value, dict) and (not ('src' in value and 'id' in value) and key != "__POST_JOB_ACTIONS__"):
new_params.update(_flatten_step_params(value, effective_key))
else:
new_params[effective_key] = value
return new_params
def _get_target_history(trans, workflow, payload, param_keys=None, index=0):
param_keys = param_keys or []
history_name = payload.get('new_history_name', None)
history_id = payload.get('history_id', None)
history_param = payload.get('history', None)
if [history_name, history_id, history_param].count(None) < 2:
raise exceptions.RequestParameterInvalidException("Specified workflow target history multiple ways - at most one of 'history', 'history_id', and 'new_history_name' may be specified.")
if history_param:
if history_param.startswith('hist_id='):
history_id = history_param[8:]
else:
history_name = history_param
if history_id:
history_manager = histories.HistoryManager(trans.app)
target_history = history_manager.get_owned(trans.security.decode_id(history_id), trans.user, current_history=trans.history)
else:
if history_name:
nh_name = history_name
else:
nh_name = 'History from %s workflow' % workflow.name
if len(param_keys) <= index:
raise exceptions.MessageException("Incorrect expansion of workflow batch parameters.")
ids = param_keys[index]
nids = len(ids)
if nids == 1:
nh_name = '{} on {}'.format(nh_name, ids[0])
elif nids > 1:
nh_name = '{} on {} and {}'.format(nh_name, ', '.join(ids[0:-1]), ids[-1])
new_history = trans.app.model.History(user=trans.user, name=nh_name)
trans.sa_session.add(new_history)
target_history = new_history
return target_history
def build_workflow_run_configs(trans, workflow, payload):
app = trans.app
allow_tool_state_corrections = payload.get('allow_tool_state_corrections', False)
use_cached_job = payload.get('use_cached_job', False)
# Sanity checks.
if len(workflow.steps) == 0:
raise exceptions.MessageException("Workflow cannot be run because it does not have any steps")
if workflow.has_cycles:
raise exceptions.MessageException("Workflow cannot be run because it contains cycles")
if 'step_parameters' in payload and 'parameters' in payload:
raise exceptions.RequestParameterInvalidException("Cannot specify both legacy parameters and step_parameters attributes.")
if 'inputs' in payload and 'ds_map' in payload:
raise exceptions.RequestParameterInvalidException("Cannot specify both legacy ds_map and input attributes.")
add_to_history = 'no_add_to_history' not in payload
legacy = payload.get('legacy', False)
already_normalized = payload.get('parameters_normalized', False)
raw_parameters = payload.get('parameters', {})
run_configs = []
unexpanded_param_map = _normalize_step_parameters(workflow.steps, raw_parameters, legacy=legacy, already_normalized=already_normalized)
unexpanded_inputs = payload.get('inputs', None)
inputs_by = payload.get('inputs_by', None)
# New default is to reference steps by index of workflow step
# which is intrinsic to the workflow and independent of the state
# of Galaxy at the time of workflow import.
default_inputs_by = 'step_index|step_uuid'
inputs_by = inputs_by or default_inputs_by
if unexpanded_inputs is None:
# Default to legacy behavior - read ds_map and reference steps
# by unencoded step id (a raw database id).
unexpanded_inputs = payload.get('ds_map', {})
if legacy:
default_inputs_by = 'step_id|step_uuid'
inputs_by = inputs_by or default_inputs_by
else:
unexpanded_inputs = unexpanded_inputs or {}
expanded_params, expanded_param_keys, expanded_inputs = expand_workflow_inputs(unexpanded_param_map, unexpanded_inputs)
for index, (param_map, inputs) in enumerate(zip(expanded_params, expanded_inputs)):
history = _get_target_history(trans, workflow, payload, expanded_param_keys, index)
if inputs or not already_normalized:
normalized_inputs = _normalize_inputs(workflow.steps, inputs, inputs_by)
else:
# Only allow dumping IDs directly into JSON database instead of properly recording the
# inputs with referential integrity if parameters are already normalized (coming from tool form).
normalized_inputs = {}
if param_map:
# disentangle raw parameter dictionaries into formal request structures if we can
# to setup proper WorkflowRequestToInputDatasetAssociation, WorkflowRequestToInputDatasetCollectionAssociation
# and WorkflowRequestInputStepParameter objects.
for step in workflow.steps:
normalized_key = step.id
if step.type == "parameter_input":
if normalized_key in param_map:
value = param_map.pop(normalized_key)
normalized_inputs[normalized_key] = value["input"]
steps_by_id = workflow.steps_by_id
# Set workflow inputs.
for key, input_dict in normalized_inputs.items():
step = steps_by_id[key]
if step.type == 'parameter_input':
continue
if 'src' not in input_dict:
raise exceptions.RequestParameterInvalidException("Not input source type defined for input '%s'." % input_dict)
if 'id' not in input_dict:
raise exceptions.RequestParameterInvalidException("Not input id defined for input '%s'." % input_dict)
if 'content' in input_dict:
raise exceptions.RequestParameterInvalidException("Input cannot specify explicit 'content' attribute %s'." % input_dict)
input_source = input_dict['src']
input_id = input_dict['id']
try:
if input_source == 'ldda':
ldda = trans.sa_session.query(app.model.LibraryDatasetDatasetAssociation).get(trans.security.decode_id(input_id))
assert trans.user_is_admin or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), ldda.dataset)
content = ldda.to_history_dataset_association(history, add_to_history=add_to_history)
elif input_source == 'ld':
ldda = trans.sa_session.query(app.model.LibraryDataset).get(trans.security.decode_id(input_id)).library_dataset_dataset_association
assert trans.user_is_admin or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), ldda.dataset)
content = ldda.to_history_dataset_association(history, add_to_history=add_to_history)
elif input_source == 'hda':
# Get dataset handle, add to dict and history if necessary
content = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(trans.security.decode_id(input_id))
assert trans.user_is_admin or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), content.dataset)
elif input_source == 'uuid':
dataset = trans.sa_session.query(app.model.Dataset).filter(app.model.Dataset.uuid == input_id).first()
if dataset is None:
# this will need to be changed later. If federation code is avalible, then a missing UUID
# could be found amoung fereration partners
raise exceptions.RequestParameterInvalidException("Input cannot find UUID: %s." % input_id)
assert trans.user_is_admin or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), dataset)
content = history.add_dataset(dataset)
elif input_source == 'hdca':
content = app.dataset_collections_service.get_dataset_collection_instance(trans, 'history', input_id)
else:
raise exceptions.RequestParameterInvalidException("Unknown workflow input source '%s' specified." % input_source)
if add_to_history and content.history != history:
content = content.copy()
if isinstance(content, app.model.HistoryDatasetAssociation):
history.add_dataset(content)
else:
history.add_dataset_collection(content)
input_dict['content'] = content
except AssertionError:
raise exceptions.ItemAccessibilityException("Invalid workflow input '%s' specified" % input_id)
for key in set(normalized_inputs.keys()):
value = normalized_inputs[key]
if isinstance(value, dict) and 'content' in value:
normalized_inputs[key] = value['content']
else:
normalized_inputs[key] = value
resource_params = payload.get('resource_params', {})
if resource_params:
# quick attempt to validate parameters, just handle select options now since is what
# is needed for DTD - arbitrary plugins can define arbitrary logic at runtime in the
# destination function. In the future this should be extended to allow arbitrary
# pluggable validation.
resource_mapper_function = get_resource_mapper_function(trans.app)
# TODO: Do we need to do anything with the stored_workflow or can this be removed.
resource_parameters = resource_mapper_function(trans=trans, stored_workflow=None, workflow=workflow)
for resource_parameter in resource_parameters:
if resource_parameter.get("type") == "select":
name = resource_parameter.get("name")
if name in resource_params:
value = resource_params[name]
valid_option = False
# TODO: How should be handle the case where no selection is made by the user
# This can happen when there is a select on the page but the user has no options to select
# Here I have the validation pass it through. An alternative may be to remove the parameter if
# it is None.
if value is None:
valid_option = True
else:
for option_elem in resource_parameter.get('data'):
option_value = option_elem.get("value")
if value == option_value:
valid_option = True
if not valid_option:
raise exceptions.RequestParameterInvalidException("Invalid value for parameter '%s' found." % name)
run_configs.append(WorkflowRunConfig(
target_history=history,
replacement_dict=payload.get('replacement_params', {}),
inputs=normalized_inputs,
param_map=param_map,
allow_tool_state_corrections=allow_tool_state_corrections,
use_cached_job=use_cached_job,
resource_params=resource_params,
))
return run_configs
def workflow_run_config_to_request(trans, run_config, workflow):
param_types = model.WorkflowRequestInputParameter.types
workflow_invocation = model.WorkflowInvocation()
workflow_invocation.uuid = uuid.uuid1()
workflow_invocation.history = run_config.target_history
def add_parameter(name, value, type):
parameter = model.WorkflowRequestInputParameter(
name=name,
value=value,
type=type,
)
workflow_invocation.input_parameters.append(parameter)
steps_by_id = {}
for step in workflow.steps:
steps_by_id[step.id] = step
serializable_runtime_state = step.module.encode_runtime_state(step.state)
step_state = model.WorkflowRequestStepState()
step_state.workflow_step = step
log.info("Creating a step_state for step.id %s" % step.id)
step_state.value = serializable_runtime_state
workflow_invocation.step_states.append(step_state)
if step.type == "subworkflow":
subworkflow_run_config = WorkflowRunConfig(
target_history=run_config.target_history,
replacement_dict=run_config.replacement_dict,
copy_inputs_to_history=False,
use_cached_job=run_config.use_cached_job,
inputs={},
param_map=run_config.param_map.get(step.order_index, {}),
allow_tool_state_corrections=run_config.allow_tool_state_corrections,
resource_params=run_config.resource_params
)
subworkflow_invocation = workflow_run_config_to_request(
trans,
subworkflow_run_config,
step.subworkflow,
)
workflow_invocation.attach_subworkflow_invocation_for_step(
step,
subworkflow_invocation,
)
replacement_dict = run_config.replacement_dict
for name, value in replacement_dict.items():
add_parameter(
name=name,
value=value,
type=param_types.REPLACEMENT_PARAMETERS,
)
for step_id, content in run_config.inputs.items():
workflow_invocation.add_input(content, step_id)
for step_id, param_dict in run_config.param_map.items():
add_parameter(
name=step_id,
value=json.dumps(param_dict),
type=param_types.STEP_PARAMETERS,
)
resource_parameters = run_config.resource_params
for key, value in resource_parameters.items():
add_parameter(key, value, param_types.RESOURCE_PARAMETERS)
add_parameter("copy_inputs_to_history", "true" if run_config.copy_inputs_to_history else "false", param_types.META_PARAMETERS)
add_parameter("use_cached_job", "true" if run_config.use_cached_job else "false", param_types.META_PARAMETERS)
return workflow_invocation
def workflow_request_to_run_config(work_request_context, workflow_invocation):
param_types = model.WorkflowRequestInputParameter.types
history = workflow_invocation.history
replacement_dict = {}
inputs = {}
param_map = {}
resource_params = {}
copy_inputs_to_history = None
use_cached_job = False
for parameter in workflow_invocation.input_parameters:
parameter_type = parameter.type
if parameter_type == param_types.REPLACEMENT_PARAMETERS:
replacement_dict[parameter.name] = parameter.value
elif parameter_type == param_types.META_PARAMETERS:
if parameter.name == "copy_inputs_to_history":
copy_inputs_to_history = (parameter.value == "true")
if parameter.name == 'use_cached_job':
use_cached_job = (parameter.value == 'true')
elif parameter_type == param_types.RESOURCE_PARAMETERS:
resource_params[parameter.name] = parameter.value
elif parameter_type == param_types.STEP_PARAMETERS:
param_map[int(parameter.name)] = json.loads(parameter.value)
for input_association in workflow_invocation.input_datasets:
inputs[input_association.workflow_step_id] = input_association.dataset
for input_association in workflow_invocation.input_dataset_collections:
inputs[input_association.workflow_step_id] = input_association.dataset_collection
for input_association in workflow_invocation.input_step_parameters:
inputs[input_association.workflow_step_id] = input_association.parameter_value
if copy_inputs_to_history is None:
raise exceptions.InconsistentDatabase("Failed to find copy_inputs_to_history parameter loading workflow_invocation from database.")
workflow_run_config = WorkflowRunConfig(
target_history=history,
replacement_dict=replacement_dict,
inputs=inputs,
param_map=param_map,
copy_inputs_to_history=copy_inputs_to_history,
use_cached_job=use_cached_job,
resource_params=resource_params,
)
return workflow_run_config
def __decode_id(trans, workflow_id, model_type="workflow"):
try:
return trans.security.decode_id(workflow_id)
except Exception:
message = "Malformed {} id ( {} ) specified, unable to decode".format(model_type, workflow_id)
raise exceptions.MalformedId(message)
|
py | b4126d730491949f598e058b26a217ba4de4d0e7 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/vlan/match/single-tagged-list/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State for matching single-tagged packets with a list of VLAN
identifiers.
"""
__slots__ = ('_path_helper', '_extmethods', '__vlan_ids',)
_yang_name = 'state'
_yang_namespace = 'http://openconfig.net/yang/interfaces'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__vlan_ids = YANGDynClass(unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']})), is_leaf=False, yang_name="vlan-ids", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['interfaces', 'interface', 'subinterfaces', 'subinterface', 'vlan', 'match', 'single-tagged-list', 'state']
def _get_vlan_ids(self):
"""
Getter method for vlan_ids, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/vlan/match/single_tagged_list/state/vlan_ids (oc-vlan-types:vlan-id)
YANG Description: VLAN identifiers for single-tagged packets.
"""
return self.__vlan_ids
def _set_vlan_ids(self, v, load=False):
"""
Setter method for vlan_ids, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/vlan/match/single_tagged_list/state/vlan_ids (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_ids is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_ids() directly.
YANG Description: VLAN identifiers for single-tagged packets.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']})), is_leaf=False, yang_name="vlan-ids", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan_ids must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']})), is_leaf=False, yang_name="vlan-ids", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__vlan_ids = t
if hasattr(self, '_set'):
self._set()
def _unset_vlan_ids(self):
self.__vlan_ids = YANGDynClass(unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']})), is_leaf=False, yang_name="vlan-ids", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
vlan_ids = __builtin__.property(_get_vlan_ids)
_pyangbind_elements = OrderedDict([('vlan_ids', vlan_ids), ])
|
py | b4126d7bb463e1a39ef5934aec48d8da1b857dc7 | import socket, errno, select
from . import tube
from .. import log
class sock(tube.tube):
"""Methods available exclusively to sockets."""
def __init__(self, timeout):
super(sock, self).__init__(timeout)
self.closed = {"recv": False, "send": False}
# Overwritten for better usability
def recvall(self):
"""recvall() -> str
Receives data until the socket is closed.
"""
if hasattr(self, 'type') and self.type == socket.SOCK_DGRAM:
log.error("UDP sockets does not supports recvall")
else:
return super(sock, self).recvall()
def recv_raw(self, numb):
if self.closed["recv"]:
raise EOFError
go = True
while go:
go = False
try:
data = self.sock.recv(numb)
except socket.timeout:
return None
except IOError as e:
if e.errno == errno.EAGAIN:
return None
elif e.errno in [errno.ECONNREFUSED, errno.ECONNRESET]:
self.shutdown("recv")
raise EOFError
elif e.errno == errno.EINTR:
go = True
else:
raise
if data == '':
self.shutdown("recv")
raise EOFError
else:
return data
def send_raw(self, data):
if self.closed["send"]:
raise EOFError
try:
self.sock.sendall(data)
except IOError as e:
eof_numbers = [errno.EPIPE, errno.ECONNRESET, errno.ECONNREFUSED]
if e.message == 'Socket is closed' or e.errno in eof_numbers:
self.shutdown("send")
raise EOFError
else:
raise
def settimeout_raw(self, timeout):
if not self.sock:
return
if timeout != None and timeout <= 0:
self.sock.setblocking(0)
else:
self.sock.setblocking(1)
self.sock.settimeout(timeout)
def can_recv_raw(self, timeout):
if not self.sock or self.closed["recv"]:
return False
return select.select([self.sock], [], [], timeout) == ([self.sock], [], [])
def connected_raw(self, direction):
if not self.sock:
return False
if direction == 'any':
return True
elif direction == 'recv':
return not self.closed['recv']
elif direction == 'send':
return not self.closed['send']
def close(self):
if not self.sock:
return
# Call shutdown without triggering another call to close
self.closed['hack'] = False
self.shutdown('recv')
self.shutdown('send')
del self.closed['hack']
self.sock.close()
self.sock = None
self._close_msg()
def _close_msg(self):
log.info('Closed connection to %s port %d' % (self.rhost, self.rport))
def fileno(self):
if not self.sock:
log.error("A closed socket does not have a file number")
return self.sock.fileno()
def shutdown_raw(self, direction):
if self.closed[direction]:
return
self.closed[direction] = True
if direction == "send":
try:
self.sock.shutdown(socket.SHUT_WR)
except IOError as e:
if e.errno == errno.ENOTCONN:
pass
else:
raise
if direction == "recv":
try:
self.sock.shutdown(socket.SHUT_RD)
except IOError as e:
if e.errno == errno.ENOTCONN:
pass
else:
raise
if False not in self.closed.values():
self.close()
|
py | b4126ddfb454a56d606431f4379ac696984f64ee | #! /usr/bin/env python
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
import time
import sys
import os
import io
import json
from optparse import OptionParser
from docutils.core import publish_cmdline
from docutils.writers import s5_html as w
import re
try:
from ConfigParser import SafeConfigParser, NoOptionError, NoSectionError
except ImportError:
# try to be python3 compatible
from configparser import SafeConfigParser, NoOptionError, NoSectionError
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import requests
bearer_file = os.path.expanduser('~/.medium_bearer')
config_file = os.path.expanduser('~/.medium')
config_example = """
# Config file example
[medium]
client_id=supplied_when_registering_app
client_secret=supplied_when_registering_app
redirect_url=http://192.0.2.1/must_match_registered_url
state=canBeAnything
"""
scopes = ["basicProfile", "publishPost", "listPublications"]
usage = "Allows post of html, markdown, or rst files to medium.\n\nusage: %prog [options] [file]"
op = OptionParser(usage, add_help_option=True)
op.add_option("-c", "--code", dest="code", default=None, help="code from redirect url after "
"approval")
op.add_option("-u", "--user", action="store_true", default=False, dest='user',
help='print user info')
op.add_option("-l", "--list-publications", action="store_true", default=False, dest='list',
help='list-publications')
op.add_option("-t", "--title", default=None, dest='title', help='article title')
op.add_option("-a", "--authors", default=None, dest='publication',
help='show contributor info for pub')
op.add_option("-p", "--pub", default=None, dest='pub', help='posts to publication')
op.add_option("-r", "--ref-url", default=None, dest='url', help="canonicalUrl. Ref if originally "
"posted elsewhere")
op.add_option("-k", "--keep-tmpfiles", action="store_false", dest="remove_tmpfiles", default=True,
help="Keep /tmp/article.rst and /tmp/article.html tmp files when processing rst")
(o, args) = op.parse_args()
if len(args) == 1:
s_file = args[0]
if os.path.isfile(s_file) is False:
raise UserWarning("Cannot find file for posting %s" % s_file)
else:
s_file = None
if (int(s_file is not None) + int(o.list) + int(o.user) + int(o.publication is not None)) != 1:
print "You must specify one of --list --user or file to post"
op.print_help()
sys.exit(0)
c = SafeConfigParser()
if len(c.read(config_file)) == 0:
print "Error Config file not found: %s\n%s" % (config_file, config_example)
os.exit(1)
try:
client_id = c.get('medium', 'client_id')
client_secret = c.get('medium', 'client_secret')
redirect_url = c.get('medium', 'redirect_url')
state = c.get('medium', 'state')
except [NoOptionError, NoSectionError] as e:
print "Configuration Error %s\n%s" % (e, config_example)
sys.exit(1)
class MediumClient(object):
"""A client for the Medium OAuth2 REST API."""
def __init__(self, application_id=None, application_secret=None,
access_token=None):
self.application_id = application_id
self.application_secret = application_secret
self.access_token = access_token
self._user = None
self._BASE_PATH = "https://api.medium.com"
@property
def user_id(self):
"""Current User ID.
:return: User id from .get_current_user()['id']
:rtype: string
"""
if self._user is None:
self._user = self.get_current_user()
return self._user['id']
def get_authorization_url(self, state, redirect_url, scopes):
"""Get a URL for users to authorize the application.
:param str state: A string that will be passed back to the redirect_url
:param str redirect_url: The URL to redirect after authorization
:param list scopes: The scopes to grant the application
:returns: str
"""
qs = {
"client_id": self.application_id,
"scope": ",".join(scopes),
"state": state,
"response_type": "code",
"redirect_uri": redirect_url,
}
return "https://medium.com/m/oauth/authorize?" + urlencode(qs)
def exchange_authorization_code(self, code, redirect_url):
"""Exchange the authorization code for a long-lived access token, and
set the token on the current Client.
:param str code: The code supplied to the redirect URL after a user
authorizes the application
:param str redirect_url: The same redirect URL used for authorizing
the application
:returns: A dictionary with the new authorizations ::
{
'token_type': 'Bearer',
'access_token': '...',
'expires_at': 1449441560773,
'refresh_token': '...',
'scope': ['basicProfile', 'publishPost']
}
"""
data = {
"code": code,
"client_id": self.application_id,
"client_secret": self.application_secret,
"grant_type": "authorization_code",
"redirect_uri": redirect_url,
}
return self._request_and_set_auth_code(data)
def exchange_refresh_token(self, refresh_token):
"""Exchange the supplied refresh token for a new access token, and
set the token on the current Client.
:param str refresh_token: The refresh token, as provided by
``exchange_authorization_code()``
:returns: A dictionary with the new authorizations ::
{
'token_type': 'Bearer',
'access_token': '...',
'expires_at': 1449441560773,
'refresh_token': '...',
'scope': ['basicProfile', 'publishPost']
}
"""
data = {
"refresh_token": refresh_token,
"client_id": self.application_id,
"client_secret": self.application_secret,
"grant_type": "refresh_token",
}
return self._request_and_set_auth_code(data)
def get_current_user(self):
"""Fetch the data for the currently authenticated user.
Requires the ``basicProfile`` scope.
:returns: A dictionary with the users data ::
{
'username': 'kylehg',
'url': 'https://medium.com/@kylehg',
'imageUrl': 'https://cdn-images-1.medium.com/...',
'id': '1f86...',
'name': 'Kyle Hardgrave'
}
"""
if self._user is None:
self._user = self._request("GET", "/v1/me")
return self._user
def create_post(self, title, content, content_format, publication_id=None, tags=None,
canonical_url=None, publish_status=None, license=None):
"""Create a post for the current user.
Requires the ``publishPost`` scope.
:param str title: The title of the post
:param str content: The content of the post, in HTML or Markdown
:param str content_format: The format of the post content, either
``html`` or ``markdown``
:param: str publication_id: Publication ID when publishing to publication.
:param list tags: (optional), List of tags for the post, max 3
:param str canonical_url: (optional), A rel="canonical" link for
the post
:param str publish_status: (optional), What to publish the post as,
either ``public``, ``unlisted``, or ``draft``. Defaults to
``public``.
:param license: (optional), The license to publish the post under:
- ``all-rights-reserved`` (default)
- ``cc-40-by``
- ``cc-40-by-sa``
- ``cc-40-by-nd``
- ``cc-40-by-nc``
- ``cc-40-by-nc-nd``
- ``cc-40-by-nc-sa``
- ``cc-40-zero``
- ``public-domain``
:returns: A dictionary with the post data ::
{
'canonicalUrl': '',
'license': 'all-rights-reserved',
'title': 'My Title',
'url': 'https://medium.com/@kylehg/55050649c95',
'tags': ['python', 'is', 'great'],
'authorId': '1f86...',
'publishStatus': 'draft',
'id': '55050649c95'
}
"""
data = {
"title": title,
"content": content,
"contentFormat": content_format,
}
if tags is not None:
data["tags"] = tags
if canonical_url is not None:
data["canonicalUrl"] = canonical_url
if publish_status is not None:
data["publishStatus"] = publish_status
if license is not None:
data["license"] = license
if publication_id is None:
path = "/v1/users/%s/posts" % self.user_id
else:
path = "/v1/publications/%s/posts" % publication_id
return self._request("POST", path, json=data)
def upload_image(self, file_path, content_type):
"""Upload a local image to Medium for use in a post.
Requires the ``uploadImage`` scope.
:param str file_path: The file path of the image
:param str content_type: The type of the image. Valid values are
``image/jpeg``, ``image/png``, ``image/gif``, and ``image/tiff``.
:returns: A dictionary with the image data ::
{
'url': 'https://cdn-images-1.medium.com/0*dlkfjalksdjfl.jpg',
'md5': 'd87e1628ca597d386e8b3e25de3a18b8'
}
"""
with open(file_path, "rb") as f:
filename = os.path.basename(file_path)
files = {"image": (filename, f, content_type)}
return self._request("POST", "/v1/images", files=files)
def _request_and_set_auth_code(self, data):
"""Request an access token and set it on the current client."""
result = self._request("POST", "/v1/tokens", form_data=data)
self.access_token = result["access_token"]
return result
def _request(self, method, path, json=None, form_data=None, files=None):
"""Make a signed request to the given route."""
url = self._BASE_PATH + path
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Authorization": "Bearer %s" % self.access_token,
}
resp = requests.request(method, url, json=json, data=form_data,
files=files, headers=headers)
json = resp.json()
if 200 <= resp.status_code < 300:
try:
return json["data"]
except KeyError:
return json
raise MediumError("API request failed", json)
def get_contributors(self, publication_id):
"""Fetch a list of contributors to a publication.
Requires ``listPublications`` scope.
:param publication_id: The appllication-specific publication id as returned by
``get_publications()``
:return: publications
:rtype: `dict`
"""
return self._request("GET", "/v1/publications/%s/contributors" % publication_id)
def get_publications(self):
"""Fetch a list of publications associated with the user.
Requires ``listPublications`` scope.
:return: users data
:rtype: `dict`
"""
return self._request("GET", "/v1/users/%s/publications" % self.user_id)
class MediumError(Exception):
"""Wrapper for exceptions generated by the Medium API."""
def __init__(self, message, resp={}):
self.resp = resp
try:
error = resp["errors"][0]
except KeyError:
error = {}
self.code = error.get("code", -1)
self.msg = error.get("message", message)
super(MediumError, self).__init__(self.msg)
client = MediumClient(application_id=client_id, application_secret=client_secret)
if os.path.isfile(bearer_file):
with io.open(bearer_file, encoding='utf-8', mode='r') as bf:
try:
bearer = json.load(bf, encoding='utf-8')
bearer = client.exchange_refresh_token(bearer['refresh_token'])
except MediumError, e:
print "Token failure. You must refresh your token.\n%s" % (e)
os.unlink(bearer_file)
except Exception, e:
print "Token decode failure. You must refresh your token.\n%s" % (e)
os.unlink(bearer_file)
if os.path.isfile(bearer_file) is False:
if o.code is None and os.path.isfile(bearer_file) is False:
auth_url = client.get_authorization_url(state, redirect_url, scopes)
print "Authorized the app by following the url, and passing the code= value in " \
"the redirect url to --code to generate a new bearer token\n\n%s" % auth_url
sys.exit(0)
else:
bearer = client.exchange_authorization_code(o.code, redirect_url)
with open(bearer_file, mode='w') as bf:
json.dump(bearer, bf, encoding='utf-8', indent=3)
if o.user:
resp = client.get_current_user()
elif o.list:
resp = client.get_publications()
elif o.publication is not None:
resp = client.get_contributors(o.publication)
elif s_file is not None:
in_format = "markdown" if s_file.lower()[-3:] == '.md' else "html"
title = "%s %s" % (os.path.basename(os.path.splitext(s_file)[0]).replace("_", " "),
time.strftime("%Y%m%d-%X%Z")) if o.title is None else o.title
if s_file[-4:].lower() == ".rst":
html_file = "/tmp/%s.html" % os.path.basename(s_file)[:-4]
tmp_rst = "/tmp/%s" % os.path.basename(s_file)
re_number = re.compile(u'^(\s*)(?:#|\d+)\.\s(\S.*)$')
with io.open(s_file, mode="r", encoding="utf-8") as content_in:
with io.open(tmp_rst, mode='w', encoding='utf-8') as content_out:
content_out.write(u'.. role:: index(raw)\n :format: html\n\n')
for line in content_in:
if u'.. todo:: ' in line:
line = line.replace(u'.. todo:: ', u'.. note:: TODO: ')
i = line.find(u'.. code-block::')
if i >= 0:
line = line[:i] + u'.. code-block::\n'
m = re_number.match(line)
if m is not None:
line = u'%s %s' % m.groups()
content_out.write(line)
publish_cmdline(writer=w.Writer(), argv=[tmp_rst, html_file])
s_file = html_file
with io.open(s_file, mode="r", encoding="utf-8") as content:
if o.pub is None:
print "posting to user"
resp = client.create_post(title=title, content=content.read(),
content_format=in_format, publish_status="draft",
canonical_url=o.url)
else:
print "posting to publication"
resp = client.create_post(publication_id=o.pub, title=title, content=content.read(),
content_format=in_format, publish_status="draft",
canonical_url=o.url)
if o.remove_tmpfiles and "tmp_rst" in vars():
os.unlink(tmp_rst)
os.unlink(html_file)
print json.dumps(resp, indent=1)
|
py | b4126e55407f933688c14dea6ca4b4179f2516f3 | import unittest
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit # Python 2
from mock import Mock, patch
from oauthlib.common import generate_client_id
from oauthlib.common import generate_token
from oauthlib.oauth1 import SignatureOnlyEndpoint
from oauthlib.oauth1 import RequestValidator
from lti import LaunchParams, OutcomeRequest, ToolProvider
from lti.utils import parse_qs, InvalidLTIConfigError
from lti.tool_provider import ProxyValidator
def create_tp(key=None, secret=None, lp=None, launch_url=None,
launch_headers=None, tp_class=ToolProvider):
key = key or generate_client_id()
secret = secret or generate_token()
launch_params = LaunchParams()
if lp is not None:
launch_params.update(lp)
launch_url = launch_url or "http://example.edu"
launch_headers = launch_headers or {}
return tp_class(key, secret, launch_params, launch_url, launch_headers)
class TestToolProvider(unittest.TestCase):
def test_constructor(self):
tp = create_tp()
self.assertIsInstance(tp.launch_params, LaunchParams)
tp = create_tp(launch_headers={'foo': 'bar'})
self.assertEqual(tp.launch_headers['foo'], 'bar')
def test_is_valid_request(self):
"""
just checks that the TP sends the correct args to the endpoint
"""
key = generate_client_id()
secret = generate_token()
lp = {
'lti_version': 'foo',
'lti_message_type': 'bar',
'resource_link_id': 123
}
launch_url = 'http://example.edu/foo/bar'
launch_headers = {'Content-Type': 'baz'}
tp = create_tp(key, secret, lp, launch_url, launch_headers)
with patch.object(SignatureOnlyEndpoint, 'validate_request') as mv:
mv.return_value = True, None # Tuple of valid, request
self.assertTrue(tp.is_valid_request(SignatureOnlyEndpoint, Mock()))
call_url, call_method, call_params, call_headers = mv.call_args[0]
self.assertEqual(call_url, launch_url)
self.assertEqual(call_method, 'POST')
self.assertEqual(call_params, lp)
self.assertEqual(call_headers, launch_headers)
def test_is_valid_request_no_key_or_secret(self):
"""
Checks that the key and secret will be populated during validation.
"""
key = 'spamspamspam'
secret_ = 'eggseggsegss'
lp = LaunchParams({
'lti_version': 'foo',
'lti_message_type': 'bar',
'resource_link_id': '123',
'oauth_consumer_key': key,
'oauth_nonce': '9069031379649850801466828504',
'oauth_timestamp': '1466828504',
'oauth_version': '1.0',
'oauth_signature_method': 'HMAC-SHA1',
'oauth_signature': 'WZ9IHyFnKgDKBvnAfNSL3aOVteg=',
})
launch_url = 'https://example.edu/foo/bar'
launch_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
class TpValidator(RequestValidator):
dummy_client = ''
def validate_timestamp_and_nonce(self, timestamp, nonce, request,
request_token=None,
access_token=None):
return True
def validate_client_key(self, client_key, request):
return True
def get_client_secret(self, client_key, request):
return secret_
secret = secret_ # Fool the ProxyValidator
tp = ToolProvider(params=lp, launch_url=launch_url,
launch_headers=launch_headers)
SOE = SignatureOnlyEndpoint
with patch.object(SOE, '_check_mandatory_parameters'):
with patch.object(SOE, '_check_signature', return_value=True):
self.assertTrue(tp.is_valid_request(
SOE, TpValidator()))
self.assertEqual(tp.consumer_key, key)
self.assertEqual(tp.consumer_secret, secret_)
def test_proxy_validator(self):
'''
Should store the secret when get_client_secret is called.
'''
class TpValidator(RequestValidator):
def get_client_secret(self, client_key, request):
return 'eggseggseggs'
pv = ProxyValidator(TpValidator())
self.assertFalse(hasattr(pv, 'secret'))
self.assertEqual(
pv.get_client_secret('spamspamspam', None), 'eggseggseggs')
self.assertEqual(pv.secret, 'eggseggseggs')
def test_outcome_service(self):
'''
Should recognize an outcome service.
'''
tp = create_tp()
self.assertFalse(tp.is_outcome_service())
tp = create_tp(lp={'lis_result_sourcedid': 1})
self.assertFalse(tp.is_outcome_service())
tp = create_tp(lp={
'lis_outcome_service_url': 'foo',
'lis_result_sourcedid': 1
})
self.assertTrue(tp.is_outcome_service())
def test_return_url_with_messages(self):
'''
Should generate a return url with messages.
'''
tp = create_tp()
self.assertIsNone(tp.build_return_url())
tp = create_tp(lp={
'launch_presentation_return_url': 'http://foo.edu/done'
})
self.assertEqual(tp.build_return_url(), 'http://foo.edu/done')
tp = create_tp(lp={
'launch_presentation_return_url': 'http://foo.edu/done',
'lti_errormsg': 'user error',
'lti_errorlog': 'lms error',
'lti_msg': 'user message',
'lti_log': 'lms message'
})
return_url = tp.build_return_url()
parsed = urlsplit(return_url)
self.assertEqual(parsed.hostname, 'foo.edu')
self.assertEqual(parsed.path, '/done')
self.assertEqual(parse_qs(parsed.query), {
'lti_errormsg': 'user error',
'lti_errorlog': 'lms error',
'lti_msg': 'user message',
'lti_log': 'lms message'
})
def test_username(self):
'''
Should find the best username.
'''
tp = create_tp()
self.assertEqual(tp.username('guy'), 'guy')
tp.lis_person_name_full = 'full'
self.assertEqual(tp.username('guy'), 'full')
tp.lis_person_name_family = 'family'
self.assertEqual(tp.username('guy'), 'family')
tp.lis_person_name_given = 'given'
self.assertEqual(tp.username('guy'), 'given')
def test_new_request(self):
key = generate_client_id()
secret = generate_token()
lp = {
'lti_version': 'foo',
'lti_message_type': 'bar',
'resource_link_id': 123
}
tp = create_tp(key, secret, lp)
req = tp.new_request({})
self.assertIsInstance(req, OutcomeRequest)
self.assertEqual(req, tp._last_outcome_request)
self.assertEqual(req.consumer_key, key)
self.assertEqual(len(tp.outcome_requests), 1)
# outcome request should get assigned attr
req = tp.new_request({'score': 1.0})
self.assertEqual(req.score, 1.0)
self.assertEqual(len(tp.outcome_requests), 2)
# but can't override some fields
req = tp.new_request({'consumer_key': 'foo'})
self.assertEqual(req.consumer_key, key)
self.assertEqual(len(tp.outcome_requests), 3)
# should fail if we use an invalid opt
self.assertRaises(InvalidLTIConfigError, tp.new_request, {'foo': 1})
self.assertEqual(len(tp.outcome_requests), 3)
def test_last_outcome_success(self):
tp = create_tp()
mock = Mock()
mock.was_outcome_post_successful.return_value = True
tp._last_outcome_request = mock
self.assertTrue(tp.last_outcome_success())
def test_last_outcome_request(self):
tp = create_tp()
tp.outcome_requests = ['foo', 'bar']
self.assertEqual(tp.last_outcome_request(), 'bar')
# mock the django.shortcuts import to allow testing
mock = Mock()
mock.shortcuts.redirect.return_value = 'foo'
mock_modules = {
'django': mock,
'django.shortcuts': mock.shortcuts
}
class TestDjangoToolProvider(unittest.TestCase):
@patch.dict('sys.modules', mock_modules)
def test_from_django_request(self):
from lti.contrib.django import DjangoToolProvider
secret = generate_token()
mock_req = Mock()
mock_req.POST = {'oauth_consumer_key': 'foo'}
mock_req.META = {'CONTENT_TYPE': 'bar'}
mock_req.build_absolute_uri.return_value = 'http://example.edu/foo/bar'
tp = DjangoToolProvider.from_django_request(secret, mock_req)
self.assertEqual(tp.consumer_key, 'foo')
self.assertEqual(tp.launch_headers['CONTENT_TYPE'], 'bar')
self.assertEqual(tp.launch_url, 'http://example.edu/foo/bar')
@patch.dict('sys.modules', mock_modules)
def test_request_required(self):
from lti.contrib.django import DjangoToolProvider
with self.assertRaises(ValueError):
DjangoToolProvider.from_django_request()
@patch.dict('sys.modules', mock_modules)
def test_secret_not_required(self):
from lti.contrib.django import DjangoToolProvider
mock_req = Mock()
mock_req.POST = {'oauth_consumer_key': 'foo'}
mock_req.META = {'CONTENT_TYPE': 'bar'}
mock_req.build_absolute_uri.return_value = 'http://example.edu/foo/bar'
tp = DjangoToolProvider.from_django_request(request=mock_req)
self.assertEqual(tp.consumer_key, 'foo')
self.assertEqual(tp.launch_headers['CONTENT_TYPE'], 'bar')
self.assertEqual(tp.launch_url, 'http://example.edu/foo/bar')
@patch.dict('sys.modules', mock_modules)
def test_success_redirect(self):
from lti.contrib.django import DjangoToolProvider
tp = create_tp(lp={
'launch_presentation_return_url': 'http://example.edu/foo'
}, tp_class=DjangoToolProvider)
redirect_retval = tp.success_redirect(msg='bar', log='baz')
self.assertEqual(redirect_retval, 'foo')
redirect_url, = mock.shortcuts.redirect.call_args[0]
parsed = urlsplit(redirect_url)
self.assertEqual(parse_qs(parsed.query), {
'lti_msg': 'bar',
'lti_log': 'baz'
})
@patch.dict('sys.modules', mock_modules)
def test_error_redirect(self):
from lti.contrib.django import DjangoToolProvider
tp = create_tp(lp={
'launch_presentation_return_url': 'http://example.edu/bar'
}, tp_class=DjangoToolProvider)
redirect_retval = tp.error_redirect(errormsg='abcd', errorlog='efgh')
self.assertEqual(redirect_retval, 'foo')
redirect_url, = mock.shortcuts.redirect.call_args[0]
parsed = urlsplit(redirect_url)
self.assertEqual(parse_qs(parsed.query), {
'lti_errormsg': 'abcd',
'lti_errorlog': 'efgh'
})
class TestFlaskToolProvider(unittest.TestCase):
def test_from_flask_request(self):
from lti.contrib.flask import FlaskToolProvider
secret = generate_token()
mock_req = Mock()
mock_req.form = {'oauth_consumer_key': 'foo'}
mock_req.headers = {'Content-type': 'bar'}
mock_req.url = 'http://example.edu/foo/bar'
tp = FlaskToolProvider.from_flask_request(secret, mock_req)
self.assertEqual(tp.consumer_key, 'foo')
self.assertEqual(tp.launch_headers['Content-type'], 'bar')
self.assertEqual(tp.launch_url, 'http://example.edu/foo/bar')
def test_request_required(self):
from lti.contrib.flask import FlaskToolProvider
with self.assertRaises(ValueError):
FlaskToolProvider.from_flask_request()
def test_secret_not_required(self):
from lti.contrib.flask import FlaskToolProvider
mock_req = Mock()
mock_req.form = {'oauth_consumer_key': 'foo'}
mock_req.headers = {'Content-type': 'bar'}
mock_req.url = 'http://example.edu/foo/bar'
tp = FlaskToolProvider.from_flask_request(request=mock_req)
self.assertEqual(tp.consumer_key, 'foo')
self.assertEqual(tp.launch_headers['Content-type'], 'bar')
self.assertEqual(tp.launch_url, 'http://example.edu/foo/bar')
|
py | b4126e5a2fb487d12aa0218f7d90f22d6985006a | """Testing doc-process endpoint."""
from typing import Callable
from fastapi.testclient import TestClient
from phrase_api.routers.http_doc_processor import router
client = TestClient(router)
def test_simple_router(clean_collection: Callable[[], None]) -> None:
"""Simple test for checking router functionality."""
sample_payload = {"document": "<p>sample</p>"}
response = client.post(
"http://127.0.0.1:8000/api/doc-process/?doc_type=HTML", json=sample_payload
)
assert response.status_code == 201
def test_with_sample_page(test_page: str) -> None:
"""Testing with sample HTML page."""
sample_payload = {"document": test_page}
response = client.post(
"http://127.0.0.1:8000/api/doc-process/?doc_type=HTML", json=sample_payload
)
assert response.status_code == 201
|
py | b412700d2ff6f5c9f1b76f69db6631232a7913d3 | """
URLconf for registration and activation, using django-registration's
one-step backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.simple.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead.
"""
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.views.generic import TemplateView
from registration.views import register
from registration_email.forms import EmailRegistrationForm
urlpatterns = patterns('',
# django-registration views
url(r'^register/$',
register,
{'backend': 'registration.backends.simple.SimpleBackend',
'template_name': 'registration/registration_form.html',
'form_class': EmailRegistrationForm,
'success_url': getattr(
settings, 'REGISTRATION_EMAIL_REGISTER_SUCCESS_URL', None),
},
name='registration_register',
),
url(r'^register/closed/$',
TemplateView.as_view(
template_name='registration/registration_closed.html'),
name='registration_disallowed',
),
# django auth urls
(r'', include('registration_email.auth_urls')),
)
|
py | b412717fed35be55a03fe7ed04b5f78e3da0766f | from conans import AutoToolsBuildEnvironment, tools, ConanFile
from conans.errors import ConanInvalidConfiguration
import os
class LibPcapConan(ConanFile):
name = "libpcap"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/the-tcpdump-group/libpcap"
description = "libpcap is an API for capturing network traffic"
license = "BSD-3-Clause"
topics = ("networking", "pcap", "sniffing", "network-traffic")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_libusb": [True, False],
"enable_universal": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"enable_libusb": False,
"enable_universal": True
}
_autotools = None
# TODO: Add dbus-glib when available
# TODO: Add libnl-genl when available
# TODO: Add libbluetooth when available
# TODO: Add libibverbs when available
@property
def _source_subfolder(self):
return "source_subfolder"
def requirements(self):
if self.options.enable_libusb:
self.requires("libusb/1.0.23")
def build_requirements(self):
if self.settings.os == "Linux":
self.build_requires("bison/3.7.1")
self.build_requires("flex/2.6.4")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_folder = self.name + "-" + self.name + "-" + self.version
os.rename(extracted_folder, self._source_subfolder)
def configure(self):
if self.settings.os == "Macos" and self.options.shared:
raise ConanInvalidConfiguration("libpcap can not be built as shared on OSX.")
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("libpcap is not supported on Windows.")
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def _configure_autotools(self):
if not self._autotools:
self._autotools = AutoToolsBuildEnvironment(self)
configure_args = ["--enable-shared" if self.options.shared else "--disable-shared"]
configure_args.append("--disable-universal" if not self.options.enable_universal else "")
configure_args.append("--enable-usb" if self.options.enable_libusb else "--disable-usb")
configure_args.extend([
"--without-libnl",
"--disable-bluetooth",
"--disable-packet-ring",
"--disable-dbus",
"--disable-rdma"
])
if tools.cross_building(self.settings):
target_os = "linux" if self.settings.os == "Linux" else "null"
configure_args.append("--with-pcap=%s" % target_os)
elif "arm" in self.settings.arch and self.settings.os == "Linux":
configure_args.append("--host=arm-linux")
self._autotools.configure(args=configure_args, configure_dir=self._source_subfolder)
return self._autotools
def build(self):
autotools = self._configure_autotools()
autotools.make()
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
if self.options.shared:
os.remove(os.path.join(self.package_folder, "lib", "libpcap.a"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
|
py | b41271ee91e8f12de897fbf1f44461fa95689d5b | """ Define Message base class.
https://github.com/hyperledger/aries-rfcs/tree/master/concepts/0020-message-types
https://github.com/hyperledger/aries-rfcs/tree/master/concepts/0008-message-id-and-threading
"""
import json
import uuid
from sirius_sdk.errors.exceptions import *
from sirius_sdk.messaging.type import Type, Semver
# Registry for restoring message instance from payload
MSG_REGISTRY = {}
def generate_id():
""" Generate a message id. """
return str(uuid.uuid4())
class Message(dict):
""" Message base class.
Inherits from dict meaning it behaves like a dictionary.
"""
__slots__ = (
'mtc',
'_type'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if '@type' not in self:
raise SiriusInvalidMessage('No @type in message')
if '@id' not in self:
self['@id'] = generate_id()
elif not isinstance(self['@id'], str):
raise SiriusInvalidMessage('Message @id is invalid; must be str')
if isinstance(self['@type'], Type):
self._type = self['@type']
self['@type'] = str(self._type)
else:
self._type = Type.from_str(self.type)
@property
def type(self):
""" Shortcut for msg['@type'] """
return self['@type']
@property
def id(self): # pylint: disable=invalid-name
""" Shortcut for msg['@id'] """
return self['@id']
@property
def doc_uri(self) -> str:
""" Get type doc_uri """
return self._type.doc_uri
@property
def protocol(self) -> str:
""" Get type protocol """
return self._type.protocol
@property
def version(self) -> str:
""" Get type version """
return self._type.version
@property
def version_info(self) -> Semver:
""" Get type version info """
return self._type.version_info
@property
def name(self) -> str:
""" Get type name """
return self._type.name
@property
def normalized_version(self) -> str:
""" Get type normalized version """
return str(self._type.version_info)
# Serialization
@classmethod
def deserialize(cls, serialized: str):
""" Deserialize a message from a json string. """
try:
return cls(json.loads(serialized))
except json.decoder.JSONDecodeError as err:
raise SiriusInvalidMessage('Could not deserialize message') from err
def serialize(self):
""" Serialize a message into a json string. """
return json.dumps(self)
def pretty_print(self):
""" return a 'pretty print' representation of this message. """
return json.dumps(self, indent=2)
def __eq__(self, other):
if not isinstance(other, Message):
return False
return super().__eq__(other)
def __hash__(self):
return hash(self.id)
def register_message_class(cls, protocol: str, name: str=None):
if issubclass(cls, Message):
descriptor = MSG_REGISTRY.get(protocol, {})
if name:
descriptor[name] = cls
else:
descriptor['*'] = cls
MSG_REGISTRY[protocol] = descriptor
else:
raise SiriusInvalidMessageClass()
def restore_message_instance(payload: dict) -> (bool, Message):
if '@type' in payload:
typ = Type.from_str(payload['@type'])
descriptor = MSG_REGISTRY.get(typ.protocol, None)
if descriptor:
if typ.name in descriptor:
cls = descriptor[typ.name]
elif '*' in descriptor:
cls = descriptor['*']
else:
cls = None
else:
cls = None
if cls is not None:
return True, cls(**payload)
else:
return False, None
else:
return False, None
|
py | b41271f9970524906a31ec59346f532c9c7a1818 | from typing import Any, Dict, List, Optional
from rotkehlchen.assets.asset import Asset
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.db.settings import ModifiableDBSettings
from rotkehlchen.db.utils import BlockchainAccounts
from rotkehlchen.tests.utils.constants import DEFAULT_TESTS_MAIN_CURRENCY
from rotkehlchen.typing import (
ApiKey,
BlockchainAccountData,
ExternalService,
ExternalServiceApiCredentials,
SupportedBlockchain,
)
def maybe_include_etherscan_key(db: DBHandler, include_etherscan_key: bool) -> None:
if not include_etherscan_key:
return
# Add the tests only etherscan API key
db.add_external_service_credentials([ExternalServiceApiCredentials(
service=ExternalService.ETHERSCAN,
api_key=ApiKey('8JT7WQBB2VQP5C3416Y8X3S8GBA3CVZKP4'),
)])
def add_blockchain_accounts_to_db(db: DBHandler, blockchain_accounts: BlockchainAccounts) -> None:
db.add_blockchain_accounts(
SupportedBlockchain.ETHEREUM,
[BlockchainAccountData(address=x) for x in blockchain_accounts.eth],
)
db.add_blockchain_accounts(
SupportedBlockchain.BITCOIN,
[BlockchainAccountData(address=x) for x in blockchain_accounts.btc],
)
def add_settings_to_test_db(
db: DBHandler,
db_settings: Optional[Dict[str, Any]],
ignored_assets: Optional[List[Asset]],
) -> None:
settings = {
# DO not submit usage analytics during tests
'submit_usage_analytics': False,
'main_currency': DEFAULT_TESTS_MAIN_CURRENCY,
}
# Set the given db_settings. The pre-set values have priority unless overriden here
if db_settings is not None:
for key, value in db_settings.items():
settings[key] = value
db.set_settings(ModifiableDBSettings(**settings)) # type: ignore
if ignored_assets:
for asset in ignored_assets:
db.add_to_ignored_assets(asset)
|
py | b41272872a9a098592fbab409ddaf978d341f473 | import logging
import errno
from tempfile import (gettempdir, mkdtemp, NamedTemporaryFile)
import shutil
from subprocess import Popen, STDOUT, PIPE
from contextlib import contextmanager
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
@contextmanager
def TemporaryDirectory(suffix='',prefix=None, dir=None):
name = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield name
finally:
try:
shutil.rmtree(name)
except OSError as exc:
# ENOENT - no such file or directory
if exc.errno != errno.ENOENT:
raise
class BashOperator(BaseOperator):
'''
Execute a Bash script, command or set of commands.
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed.
:type bash_command: string
'''
template_fields = ('bash_command',)
template_ext = ('.sh', '.bash',)
ui_color = '#f0ede4'
__mapper_args__ = {
'polymorphic_identity': 'BashOperator'
}
@apply_defaults
def __init__(self, bash_command, *args, **kwargs):
super(BashOperator, self).__init__(*args, **kwargs)
self.bash_command = bash_command
def execute(self, context):
'''
Execute the bash command in a temporary directory
which will be cleaned afterwards
'''
bash_command = self.bash_command
logging.info("tmp dir root location: \n"+ gettempdir())
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
f.write(bash_command)
f.flush()
fname = f.name
script_location=tmp_dir + "/" + fname
logging.info("Temporary script "
"location :{0}".format(script_location))
logging.info("Running command: " + bash_command)
sp = Popen(
['bash', fname],
stdout=PIPE, stderr=STDOUT,
cwd=tmp_dir)
self.sp = sp
logging.info("Output:")
for line in iter(sp.stdout.readline, ''):
logging.info(line.strip())
sp.wait()
logging.info("Command exited with "
"return code {0}".format(sp.returncode))
if sp.returncode:
raise Exception("Bash command failed")
def on_kill(self):
logging.info('Sending SIGTERM signal to bash subprocess')
self.sp.terminate()
|
py | b412730f49875a0c9452c6b1986f0c4c1cbc2321 | import helperstuff
import inspect
import os
import sys
KNOWN_CONSTRUCTORS = {}
def handle_value(val, imports):
if val is None:
return "None"
if type(val) == dict:
return {k: handle_value(v, imports) for k, v in val.items()}
if type(val) == list:
return "[" + ", ".join([handle_value(v, imports) for v in val]) + "]"
if type(val) == tuple:
return "[" + ", ".join([handle_value(v, imports) for v in val]) + "]"
if type(val) in (int, float, bool):
return str(val)
if type(val) == str:
return "'{}'".format(val)
if hasattr(val, '__class__'):
imports.append("import {}".format(val.__class__.__module__))
if id(val) in KNOWN_CONSTRUCTORS.keys():
arg_data = KNOWN_CONSTRUCTORS[id(val)]
return "{}.{}(*{}, **{})".format(
val.__class__.__module__, val.__class__.__name__,
arg_data["args"], arg_data["kwargs"])
else:
return "{}.{}()".format(val.__class__.__module__,
val.__class__.__name__)
def main():
def method_wrapper_outer(fnc, module_title, class_name, test_path):
def method_wrapper(*args, **kwargs):
imports = []
in_kwargs = handle_value(kwargs, imports)
if fnc.__name__ == "__init__":
in_args = handle_value(args[1:], imports)
KNOWN_CONSTRUCTORS[id(args[0])] = {
"args": in_args,
"kwargs": in_kwargs,
}
else:
in_args = handle_value(args, imports)
test_path_parent = os.path.abspath(
os.path.join(test_path, os.pardir))
if not os.path.exists(test_path_parent):
os.makedirs(test_path_parent)
file_handle = open(test_path, "w+")
return_value = fnc(*args, **kwargs)
res = handle_value(return_value, imports)
file_handle.write("""import unittest
from {} import {}
{}
class Test{}(unittest.TestCase):
def test_{}(self):
output = {}.{}(*{}, **{})
assert output == {}
if __name__ == '__main__':
unittest.main()
""".format(module_title, class_name, "\n".join(imports), class_name,
fnc.__name__, class_name, fnc.__name__, in_args, in_kwargs, res))
return return_value
return method_wrapper
def filter_methods(cls):
return [x for x in cls.__dict__.keys() if callable(getattr(cls, x))]
for module_title, module_obj in sys.modules.items():
if not inspect.ismodule(module_obj) or not hasattr(
module_obj, "__file__") or not module_obj.__file__.startswith(
os.getcwd()):
continue
test_path = os.path.join(
"", #"test",
os.path.relpath(module_obj.__file__, os.getcwd()))
test_path = os.path.join(
os.path.abspath(os.path.join(test_path, os.pardir)),
"test_" + os.path.split(test_path)[1])
module_members = dict(inspect.getmembers(module_obj))
for class_name, class_obj in module_members.items():
if not inspect.isclass(class_obj):
continue
method_names = filter_methods(class_obj)
for method_name in method_names:
func = getattr(class_obj, method_name)
setattr(class_obj, method_name,
method_wrapper_outer(func, module_title, class_name,
test_path))
helperstuff.HelperClass(8).my_func([55])
if __name__ == '__main__':
main()
|
py | b412733d16c4d5356795d1663fc3019a456eaf4b | from autode.wrappers.NWChem import NWChem
from autode.calculation import Calculation
from autode.species.molecule import Molecule
from autode.wrappers.keywords import OptKeywords
from autode.atoms import Atom
from autode.config import Config
from . import testutils
import numpy as np
import os
here = os.path.dirname(os.path.abspath(__file__))
test_mol = Molecule(name='methane', smiles='C')
method = NWChem()
Config.keyword_prefixes = False
opt_keywords = OptKeywords(['driver\n gmax 0.002\n grms 0.0005\n'
' xmax 0.01\n xrms 0.007\n eprec 0.00003\nend',
'basis\n * library Def2-SVP\nend',
'dft\n xc xpbe96 cpbe96\nend',
'task dft optimize'])
@testutils.work_in_zipped_dir(os.path.join(here, 'data', 'nwchem.zip'))
def test_opt_calc():
calc = Calculation(name='opt', molecule=test_mol, method=method,
keywords=opt_keywords)
calc.run()
assert os.path.exists('opt_nwchem.nw')
assert os.path.exists('opt_nwchem.out')
final_atoms = calc.get_final_atoms()
assert len(final_atoms) == 5
assert type(final_atoms[0]) is Atom
assert -40.4165 < calc.get_energy() < -40.4164
assert calc.output.exists()
assert calc.output.file_lines is not None
assert calc.get_imaginary_freqs() == []
assert calc.input.filename == 'opt_nwchem.nw'
assert calc.output.filename == 'opt_nwchem.out'
assert calc.terminated_normally()
assert calc.optimisation_converged()
assert calc.optimisation_nearly_converged() is False
charges = calc.get_atomic_charges()
assert len(charges) == 5
assert all(-1.0 < c < 1.0 for c in charges)
# Optimisation should result in small gradients
gradients = calc.get_gradients()
assert len(gradients) == 5
assert all(-0.1 < np.linalg.norm(g) < 0.1 for g in gradients)
def test_opt_single_atom():
h = Molecule(name='H', smiles='[H]')
calc = Calculation(name='opt_h', molecule=h, method=method,
keywords=opt_keywords)
calc.generate_input()
# Can't do an optimisation of a hydrogen atom..
assert os.path.exists('opt_h_nwchem.nw')
input_lines = open('opt_h_nwchem.nw', 'r').readlines()
assert 'opt' not in [keyword.lower() for keyword in input_lines[0].split()]
os.remove('opt_h_nwchem.nw')
@testutils.work_in_zipped_dir(os.path.join(here, 'data', 'nwchem.zip'))
def test_opt_hf_constraints():
keywords = OptKeywords(['driver\n gmax 0.002\n grms 0.0005\n'
' xmax 0.01\n xrms 0.007\n eprec 0.00003\nend',
'basis\n * library Def2-SVP\nend',
'task scf optimize'])
h2o = Molecule(name='water', smiles='O')
calc = Calculation(name='opt_water', molecule=h2o, method=method,
keywords=keywords,
cartesian_constraints=[0],
distance_constraints={(0, 1): 0.95})
calc.run()
h2o.atoms = calc.get_final_atoms()
assert 0.94 < h2o.distance(0, 1) < 0.96
|
py | b41274042b11de82ec880d02ab7f6496f5358993 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import TYPE_CHECKING
from .._internal.managed_identity_base import AsyncManagedIdentityBase
from .._internal.managed_identity_client import AsyncManagedIdentityClient
from ..._credentials.app_service import _get_client_args
if TYPE_CHECKING:
from typing import Any, Optional
class AppServiceCredential(AsyncManagedIdentityBase):
def get_client(self, **kwargs: "Any") -> "Optional[AsyncManagedIdentityClient]":
client_args = _get_client_args(**kwargs)
if client_args:
return AsyncManagedIdentityClient(**client_args)
return None
def get_unavailable_message(self) -> str:
return "App Service managed identity configuration not found in environment"
|
py | b41275d4b82a4e16eb0c5e8cbb994f2dff541520 | import psycopg2
import os
import json
from datetime import datetime as dt
from dotenv import load_dotenv
load_dotenv()
# connect postgreSQL
conn = psycopg2.connect(dbname=os.getenv('DB_NAME'), user=os.getenv('DB_USER'), password=os.getenv('DB_PASS'), host=os.getenv('DB_HOST'), port=os.getenv('DB_PORT'))
t = open('tenki.json', 'r', encoding='utf-8')
tenki_dic = json.load(t)
timeDefines = []
# excexute sql
cur = conn.cursor()
print(cur)
for n in range(7):
rd = tenki_dic[0]['week']['reportDatetime']
reportDatetime = dt.strptime(rd, '%Y-%m-%dT%H:%M:%S+09:00')
td = tenki_dic[0]['week']['timeSeries'][0]['timeDefines'][n]
timeDefines = dt.strptime(td, '%Y-%m-%dT%H:%M:%S+09:00')
weatherCodes = tenki_dic[0]['week']['timeSeries'][0]['areas']['weatherCodes'][n]
cur.execute('''
INSERT INTO kushiro (report_datetime, date_define, weather_code)
VALUES (%s, %s, %s)
ON CONFLICT (date_define)
DO UPDATE SET (report_datetime, weather_code, updated_at)
= ROW(EXCLUDED.report_datetime, EXCLUDED.weather_code, %s);
''', [reportDatetime, timeDefines, weatherCodes, dt.now()])
conn.commit()
#cur.execute('INSERT INTO kushiro (date_define, weather_code) VALUES (%s, %s)', [datetime.datetime.now(), '200'])
#cur.execute('INSERT INTO kushiro (weather_code) VALUES (%s);', 200)
cur.execute("SELECT * FROM kushiro;")
print(cur)
hey = cur.fetchall()
print(hey)
cur.close()
conn.close() |
py | b41276410a633e32ee7fd6c60b8649ee18ecef61 | from airflow.hooks.base_hook import BaseHook
try:
snakebite_imported = True
from snakebite.client import Client, HAClient, Namenode
except ImportError:
snakebite_imported = False
from airflow.utils import AirflowException
class HDFSHookException(AirflowException):
pass
class HDFSHook(BaseHook):
'''
Interact with HDFS. This class is a wrapper around the snakebite library.
'''
def __init__(self, hdfs_conn_id='hdfs_default'):
if not snakebite_imported:
raise ImportError(
'This HDFSHook implementation requires snakebite, but '
'snakebite is not compatible with Python 3 '
'(as of August 2015). Please use Python 2 if you require '
'this hook -- or help by submitting a PR!')
self.hdfs_conn_id = hdfs_conn_id
def get_conn(self):
'''
Returns a snakebite HDFSClient object.
'''
connections = self.get_connections(self.hdfs_conn_id)
client = None
if len(connections) == 1:
client = Client(connections[0].host, connections[0].port)
elif len(connections) > 1:
nn = [Namenode(conn.host, conn.port) for conn in connections]
client = HAClient(nn)
else:
raise HDFSHookException("conn_id doesn't exist in the repository")
return client
|
py | b4127688e20e0659e99f290896262714537e1298 | from enum import Enum, IntEnum
SHA1_USA = 'b4bd50e4131b027c334547b4524e2dbbd4227130'
SHA1_DEMO = '63fcad218f9047b6a9edbb68c98bd0dec322d7a1'
SHA1_EU = 'cff199b36ff173fb6faf152653d1bccf87c26fb7'
SHA1_JP = '6c5404a1effb17f481f352181d0f1c61a2765c5d'
SHA1_DEMO_JP = '9cdb56fa79bba13158b81925c1f3641251326412'
ROM_OFFSET = 0x08000000
ROM_SIZE = 0xffffff
# Needs to be a str enum, so it can be handled by dataclass-csv
class RomVariant(str, Enum):
USA = 'USA'
DEMO = 'DEMO'
EU = 'EU'
JP = 'JP'
DEMO_JP = 'DEMO_JP'
CUSTOM = 'CUSTOM'
CUSTOM_EU = 'CUSTOM_EU'
CUSTOM_JP = 'CUSTOM_JP'
CUSTOM_DEMO_USA = 'CUSTOM_DEMO_USA'
CUSTOM_DEMO_JP = 'CUSTOM_DEMO_JP'
def __repr__(self):
return self.name
ALL_ROM_VARIANTS = [RomVariant.USA, RomVariant.DEMO, RomVariant.EU, RomVariant.JP, RomVariant.DEMO_JP, RomVariant.CUSTOM, RomVariant.CUSTOM_EU, RomVariant.CUSTOM_JP, RomVariant.CUSTOM_DEMO_USA, RomVariant.CUSTOM_DEMO_JP]
CUSTOM_ROM_VARIANTS= [RomVariant.CUSTOM, RomVariant.CUSTOM_EU, RomVariant.CUSTOM_JP, RomVariant.CUSTOM_DEMO_USA, RomVariant.CUSTOM_DEMO_JP] |
py | b41278ad018a7ab1d05bbefee0bf46b304c90efd | # -*- coding:iso-8859-1 -*-
"""
This module offers a generic date/time string parser which is able to parse
most known formats to represent a date and/or time.
This module attempts to be forgiving with regards to unlikely input formats,
returning a datetime object even for dates which are ambiguous. If an element
of a date/time stamp is omitted, the following rules are applied:
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
specified.
- If a time zone is omitted, a timezone-naive datetime is returned.
If any other elements are missing, they are taken from the
:class:`datetime.datetime` object passed to the parameter ``default``. If this
results in a day number exceeding the valid number of days per month, the
value falls back to the end of the month.
Additional resources about date/time string formats can be found below:
- `A summary of the international standard date and time notation
<http://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
- `W3C Date and Time Formats <http://www.w3.org/TR/NOTE-datetime>`_
- `Time Formats (Planetary Rings Node) <http://pds-rings.seti.org/tools/time_formats.html>`_
- `CPAN ParseDate module
<http://search.cpan.org/~muir/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
- `Java SimpleDateFormat Class
<https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
"""
from __future__ import unicode_literals
import datetime
import string
import time
import collections
import re
from io import StringIO
from calendar import monthrange, isleap
from six import text_type, binary_type, integer_types
from . import relativedelta
from . import tz
__all__ = ["parse", "parserinfo"]
class _timelex(object):
# Fractional seconds are sometimes split by a comma
_split_decimal = re.compile("([\.,])")
def __init__(self, instream):
if isinstance(instream, binary_type):
instream = instream.decode()
if isinstance(instream, text_type):
instream = StringIO(instream)
if getattr(instream, 'read', None) is None:
raise TypeError('Parser must be a string or character stream, not '
'{itype}'.format(itype=instream.__class__.__name__))
self.instream = instream
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
@classmethod
def split(cls, s):
return list(cls(s))
@classmethod
def isword(cls, nextchar):
""" Whether or not the next character is part of a word """
return nextchar.isalpha()
@classmethod
def isnum(cls, nextchar):
""" Whether the next character is part of a number """
return nextchar.isdigit()
@classmethod
def isspace(cls, nextchar):
""" Whether the next character is whitespace """
return nextchar.isspace()
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __len__(self):
return (sum(getattr(self, attr) is not None
for attr in self.__slots__))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
if year < 100 and not century_specified:
year += self._century
if abs(year - self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class _ymd(list):
def __init__(self, tzstr, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.century_specified = False
self.tzstr = tzstr
@staticmethod
def token_could_be_year(token, year):
try:
return int(token) == year
except ValueError:
return False
@staticmethod
def find_potential_year_tokens(year, tokens):
return [token for token in tokens if _ymd.token_could_be_year(token, year)]
def find_probable_year_index(self, tokens):
"""
attempt to deduce if a pre 100 year was lost
due to padded zeros being taken off
"""
for index, token in enumerate(self):
potential_year_tokens = _ymd.find_potential_year_tokens(token, tokens)
if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2:
return index
def append(self, val):
if hasattr(val, '__len__'):
if val.isdigit() and len(val) > 2:
self.century_specified = True
elif val > 100:
self.century_specified = True
super(self.__class__, self).append(int(val))
def resolve_ymd(self, mstridx, yearfirst, dayfirst):
len_ymd = len(self)
year, month, day = (None, None, None)
if len_ymd > 3:
raise ValueError("More than three YMD values")
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
month = self[mstridx]
del self[mstridx]
if len_ymd > 1 or mstridx == -1:
if self[0] > 31:
year = self[0]
else:
day = self[0]
elif len_ymd == 2:
# Two members with numbers
if self[0] > 31:
# 99-01
year, month = self
elif self[1] > 31:
# 01-99
month, year = self
elif dayfirst and self[1] <= 12:
# 13-01
day, month = self
else:
# 01-13
month, day = self
elif len_ymd == 3:
# Three members
if mstridx == 0:
month, day, year = self
elif mstridx == 1:
if self[0] > 31 or (yearfirst and self[2] <= 31):
# 99-Jan-01
year, month, day = self
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
day, month, year = self
elif mstridx == 2:
# WTF!?
if self[1] > 31:
# 01-99-Jan
day, year, month = self
else:
# 99-01-Jan
year, day, month = self
else:
if self[0] > 31 or \
self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \
(yearfirst and self[1] <= 12 and self[2] <= 31):
# 99-01-01
if dayfirst and self[2] <= 12:
year, day, month = self
else:
year, month, day = self
elif self[0] > 12 or (dayfirst and self[1] <= 12):
# 13-01-01
day, month, year = self
else:
# 01-13-01
month, day, year = self
return year, month, day
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in minutes or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param **kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
effective_dt = datetime.datetime.now()
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
else:
effective_dt = default
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("Unknown string format")
if len(res) == 0:
raise ValueError("String does not contain a date.")
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back to
# the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if (isinstance(tzinfos, collections.Callable) or
tzinfos and res.tzname in tzinfos):
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("Offset must be tzinfo subclass, "
"tz string, or int offset.")
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
# keep up with the last token skipped so we can recombine
# consecutively skipped tokens (-2 for when i begins at 0).
last_skipped_token_i = -2
skipped_tokens = list()
try:
# year/month/day list
ymd = _ymd(timestr)
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and res.hour is None and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
#ymd.append(info.convertyear(int(s[:2])))
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = l[i-1]
ymd.append(s[:4])
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value % 1:
res.minute = int(60*(value % 1))
elif idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60*(value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif (i == len_l and l[i-2] == ' ' and
info.hms(l[i-3]) is not None):
# X h MM or X m SS
idx = info.hms(l[i-3]) + 1
if idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60*(value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
elif i+1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value % 1:
res.second = int(60*(value % 1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i+1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(value_repr)
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(l[i])
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
else:
return None, None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
assert mstridx == -1
else:
ymd.append(l[i])
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(value)
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None, None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(l[i])
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(l[i])
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(str(info.convertyear(value)))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
# For fuzzy parsing, 'a' or 'am' (both valid English words)
# may erroneously trigger the AM/PM flag. Deal with that
# here.
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and res.ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if res.hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with ' +
'AM or PM flag.')
elif not 0 <= res.hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for ' +
'12-hour clock.')
if val_is_ampm:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
res.ampm = value
elif fuzzy:
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in
string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
else:
return None, None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None, None
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
if year is not None:
res.year = year
res.century_specified = ymd.century_specified
if month is not None:
res.month = month
if day is not None:
res.day = day
except (IndexError, ValueError, AssertionError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
return res, tuple(skipped_tokens)
else:
return res, None
@staticmethod
def _skip_token(skipped_tokens, last_skipped_token_i, i, l):
if last_skipped_token_i == i - 1:
# recombine the tokens
skipped_tokens[-1] += l[i]
else:
# just append
skipped_tokens.append(l[i])
last_skipped_token_i = i
return last_skipped_token_i
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
"""
Parse a string in one of the supported formats, using the
``parserinfo`` parameters.
:param timestr:
A string containing a date/time stamp.
:param parserinfo:
A :class:`parserinfo` object containing parameters for the parser.
If ``None``, the default arguments to the :class:`parserinfo`
constructor are used.
The ``**kwargs`` parameter takes the following keyword arguments:
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in minutes or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM and
YMD. If set to ``None``, this value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
be the year, otherwise the last number is taken to be the year. If
this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2011, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2])*3600 +
int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1) % 7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1) % 7
else:
# year day (zero based)
x.yday = int(l[i])+1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
i += 2
if i+1 < len_l and l[i+1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
# vim:ts=4:sw=4:et
|
py | b41278b853c57e4b796e97b4e78bb171d87b126d | import logging
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
def get_logger(name, level=logging.DEBUG):
logger = logging.getLogger(name)
logger.handlers.clear()
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def add_filehandler(logger, filepath):
fh = logging.FileHandler(filepath)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
|
py | b41279f89ac432d1e5d2c73d1d8b0d049a158786 | '''
Convenience wrappers to make using the conf system as easy and seamless as possible
'''
def integrate(
hub,
imports,
override=None,
cli=None,
roots=None,
loader='json',
logs=True):
'''
Load the conf sub and run the integrate sequence.
'''
hub.pop.sub.add('pop.mods.conf')
hub.conf.integrate.load(
imports,
override,
cli=cli,
roots=roots,
loader=loader,
logs=logs)
|
py | b4127a08ecec7a2e0b628a953afa96dce171ac45 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._access_control_client_operations import AccessControlClientOperationsMixin
__all__ = [
'AccessControlClientOperationsMixin',
]
|
py | b4127b3e9574f0a838b6ab27813dcc74c468fa58 | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import argparse
import numpy as np
p = argparse.ArgumentParser()
p.add_argument(
"--wild_sfs",
nargs="*",
required=True,
help="""paths to wild SFS files""",
)
p.add_argument(
"--out",
required=True,
help="""name of output file""",
)
args = p.parse_args()
species = [a.split('.')[-2] for a in args.wild_sfs]
f, ax = plt.subplots()
colors = sns.color_palette('colorblind', len(args.wild_sfs))
for si, species_name in enumerate(species):
out_a = None
mut2idx = None
af = None
ac = None
for sfs_i, sfs in enumerate(args.wild_sfs):
if sfs.split('.')[-2] != species_name: continue
df = pd.read_csv(sfs, sep='\t')
if mut2idx is None:
mut2idx = dict(zip(list(df)[1:], range(len(list(df)[1:]))))
ac = df.values[:, 0]
af = ac / np.max(ac)
mut_counts = df.values[:, 1:]
if out_a is None:
out_a = mut_counts
else:
out_a += mut_counts
mut_sums = np.sum(out_a, axis=1)
mut_fracs = out_a / mut_sums[:, None]
ax.plot(
af,
mut_fracs[:, mut2idx["C>A"]],
label=species_name,
color=colors[si],
lw=2,
)
ax.set_xlabel("Allele frequency")
ax.set_ylabel("C>A mutation fraction")
ax.legend()
f.tight_layout()
f.savefig(args.out) |
py | b4127bc3b71886e42f8fa4fcede22f0663b920e5 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with multiple forward pass + checkpoint. """
import contextlib
import pickle
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import torch.optim as optim
from fairscale.nn import checkpoint_wrapper
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import auto_wrap_bn
from fairscale.nn.wrap import enable_wrap, wrap
from fairscale.utils import torch_version
from fairscale.utils.testing import dist_init, skip_if_single_gpu, teardown, temp_files_ctx
class Model(nn.Module):
"""Model to test FSDP(checkpoint())."""
def __init__(self):
super().__init__()
self.block1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=3), nn.BatchNorm2d(4), nn.ReLU(inplace=True))
self.block2 = nn.Sequential(
nn.Conv2d(4, 8, kernel_size=3),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(8, 10)
def forward(self, x):
if isinstance(x, torch.Tensor):
return self.head(self.block2(self.block1(x)))
elif isinstance(x, list):
ys = [self.head(self.block2(self.block1(e))) for e in x]
return torch.cat(ys, dim=0)
class Model2(nn.Module):
"""Model to test FSDP(checkpoint(), checkpoint())."""
def __init__(self):
super().__init__()
self.block1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=3), nn.BatchNorm2d(4), nn.ReLU(inplace=True))
self.block2 = nn.Sequential(nn.Conv2d(4, 4, kernel_size=3), nn.BatchNorm2d(4), nn.ReLU(inplace=False))
self.block3 = nn.Sequential(nn.Conv2d(4, 8, kernel_size=3), nn.BatchNorm2d(8), nn.ReLU(inplace=True))
self.head = nn.Sequential(nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), nn.Linear(8, 10))
def forward(self, x):
if isinstance(x, torch.Tensor):
return self.head(self.block3(self.block2(self.block1(x))))
elif isinstance(x, list):
ys = [self.head(self.block3(self.block2(self.block1(e)))) for e in x]
return torch.cat(ys, dim=0)
def _create_model(
with_model2, with_sync_bn, with_fsdp, with_checkpoint, mixed_precision, flatten, wrap_bn, fp32_reduce_scatter
):
model = Model2() if with_model2 else Model()
fsdp_config = None
if with_sync_bn:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
fsdp_config = {
"mixed_precision": False,
"flatten_parameters": False,
"reshard_after_forward": False,
"bucket_cap_mb": 0,
"force_input_to_fp32": True, # SyncBN needs this.
}
if with_fsdp:
if wrap_bn:
model.block1 = auto_wrap_bn(model.block1, single_rank_pg=False, fsdp_config=fsdp_config)
model.block2 = auto_wrap_bn(model.block2, single_rank_pg=False, fsdp_config=fsdp_config)
if with_model2:
model.block3 = auto_wrap_bn(model.block3, single_rank_pg=False, fsdp_config=fsdp_config)
if with_checkpoint:
model.block2 = checkpoint_wrapper(model.block2, maintain_forward_counter=True)
if with_model2:
model.block3 = checkpoint_wrapper(model.block3, maintain_forward_counter=True)
with enable_wrap(
wrapper_cls=FSDP,
flatten_parameters=flatten,
mixed_precision=mixed_precision,
compute_dtype=torch.float32,
fp32_reduce_scatter=fp32_reduce_scatter,
):
model.block1 = wrap(model.block1)
model.block2 = wrap(model.block2)
if with_model2:
model.block3 = wrap(model.block3)
model.head = wrap(model.head)
else:
if with_checkpoint:
model.block2 = checkpoint_wrapper(model.block2, maintain_forward_counter=False)
if with_model2:
model.block3 = checkpoint_wrapper(model.block3, maintain_forward_counter=False)
return model
def _distributed_worker(
gpu_id,
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
files,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
):
filename, filename_rpc = files[:2]
filename_loss = files[2:]
torch.cuda.set_device(gpu_id)
rank = gpu_id
result = dist_init(rank, world_size, filename, filename_rpc)
assert result, "Dist init failed"
# use False below to debug since error msg is not as good with cudnn.
torch.backends.cudnn.enabled = True
# these make things deterministic.
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Ensure we have multiple forward passes.
batch = [
torch.randn(size=(2, 3, 16, 16)).cuda(),
torch.randn(size=(2, 3, 9, 9)).cuda(),
torch.randn(size=(2, 3, 9, 9)).cuda(),
]
if mixed_precision and not with_fsdp:
batch = [x.half() for x in batch]
model = _create_model(
with_model2, with_sync_bn, with_fsdp, with_checkpoint, mixed_precision, flatten, wrap_bn, fp32_reduce_scatter
)
model = model.cuda()
if with_fsdp:
model = FSDP(
model,
flatten_parameters=flatten,
mixed_precision=mixed_precision,
compute_dtype=torch.float32,
fp32_reduce_scatter=fp32_reduce_scatter,
)
model.set_gradient_divide_factors(1.0, 2.0, True)
no_sync_context = contextlib.suppress()
else:
# With DDP, we need no_sync and manual gradient reduction below because
# it can't handle multiple forward pass + checkpointing otherwise.
model = DistributedDataParallel(model, device_ids=[gpu_id])
no_sync_context = model.no_sync()
mp_context = contextlib.suppress()
if mixed_precision:
mp_context = torch.cuda.amp.autocast(enabled=True)
if gpu_id == 0:
print(model)
target = torch.tensor([0, 1, 2, 3, 4, 5], dtype=torch.long).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
losses = {}
i = 0
with no_sync_context:
for iteration in range(3):
with mp_context:
out = model(batch)
loss = criterion(out, target)
print("Loss", iteration, ":", loss.item())
losses[f"iter_{i}"] = loss
i += 1
optimizer.zero_grad()
loss.backward()
# Manual grad reduction, no autocast.
if not with_fsdp:
for p in model.parameters():
dist.all_reduce(p.grad.data)
p.grad.data.div_(2.0)
# Stepping, no autocast
optimizer.step()
# Due to dist.all_reduce code block above with ddp.no_sync, we seem to hit a bug
# in DDP where tensor.cpu() and torch.save() calls both hang. FSDP is not affected.
# Therefore, we have to compare losses here instead of states.
with open(filename_loss[rank], "wb") as f:
pickle.dump(losses, f)
teardown()
_result_cache = {}
def _get_cached_results(
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
):
""" Cache the training to save time. For DDP, flatten, wrap_bn etc. doesn't matter, so
the results can be cached.
"""
if not with_fsdp:
flatten = None
wrap_bn = None
fp32_reduce_scatter = None
key = (
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
)
global _result_cache
if key not in _result_cache:
# Get 4 files: 2 for dist_init and 2 for each rank to save the losses.
with temp_files_ctx(num=2 + world_size) as temp_files:
mp.spawn(
_distributed_worker,
(
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
temp_files,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
),
nprocs=world_size,
)
final_losses = {}
for rank in range(world_size):
with open(temp_files[2 + rank], "rb") as f:
for iter_key, loss in pickle.load(f).items():
final_losses[f"rank_{rank}_{iter_key}"] = loss
_result_cache[key] = final_losses
return _result_cache[key]
@skip_if_single_gpu
@pytest.mark.parametrize("precision", ["full", "mixed"])
@pytest.mark.parametrize("flatten", ["flatten", "no_flatten"])
@pytest.mark.parametrize("wrap_bn", ["auto_wrap_bn", "no_auto_wrap_bn"])
@pytest.mark.parametrize("model_type", ["model1", "model2"])
@pytest.mark.parametrize("bn_type", ["bn", "sync_bn"])
def test_multiple_forward_checkpoint(precision, flatten, wrap_bn, model_type, bn_type):
mixed_precision = precision == "mixed"
flatten = flatten == "flatten"
wrap_bn = wrap_bn == "auto_wrap_bn"
fp32_reduce_scatter = True if mixed_precision else None
with_model2 = model_type == "model2"
with_sync_bn = bn_type == "sync_bn"
if torch_version() >= (1, 7, 0) and torch_version() < (1, 8, 0) and with_sync_bn:
# SyncBN is buggy in 1.7, errors like:
# E File "/home/circleci/venv/lib/python3.8/site-packages/torch/nn/modules/_functions.py", line 13, in forward
# E dtype=running_mean.dtype,
# E AttributeError: 'NoneType' object has no attribute 'dtype'
pytest.skip("SyncBatchNorm in 1.7 is buggy")
if with_sync_bn and not wrap_bn:
pytest.skip("SyncBatchNorm requires auto_wrap_bn")
if torch_version() < (1, 8, 0) and flatten:
# 1.6 and 1.7 throws this error:
# RuntimeError: Trying to backward through the graph a second time, but the saved
# intermediate results have already been freed. Specify retain_graph=True when calling
# backward the first time.
pytest.skip("older pytorch throws error when flatten is used")
world_size = 2
expected_losses = None
# Ensure ddp == ddp+ckpt == fsdp == fsdp+ckpt.
for with_fsdp in [False, True]:
for with_checkpoint in [False, True]:
if not with_fsdp and with_checkpoint:
continue
final_losses = _get_cached_results(
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
)
if expected_losses is None:
expected_losses = final_losses
else:
print(f"checking: fsdp {with_fsdp} ckpt {with_checkpoint} with ddp+no_ckpt")
def check(exp, res):
assert list(exp.keys()) == list(res.keys()), f"{list(exp.keys())} vs. {list(res.keys())}"
rtol = 1e-4
atol = 1e-5
if with_model2 and mixed_precision and torch_version() >= (1, 9, 0):
# On CI, with longer model2, mixed precsion and 1.9, even ddp vs. ddp+ckpt has
# larger errors.
rtol = 1e-3
atol = 1e-4
for key in exp.keys():
exp_loss = exp[key]
res_loss = res[key]
torch.testing.assert_allclose(exp_loss, res_loss, rtol=rtol, atol=atol)
check(expected_losses, final_losses)
|
py | b4127cba86aab2ab58fffdc142d844b0314162e9 | import csv
import json
from story.utils import *
def load_tree(filename):
with open(filename, "r") as fp:
tree = json.load(fp)
return tree
def remove_phrase(text):
phrases = ["Years pass...", "Years pass"]
for phrase in phrases:
text = text.replace(phrase, "")
return text
def make_stories(current_story, tree):
stories = []
action = first_to_second_person(tree["action"])
action_list = action.split(" ")
first_word = action_list[0]
if first_word[-1] == ".":
first_word = first_word[:-1]
dont_add_you = [
"the",
"another",
"next",
"in",
"monday",
"back",
"a",
"years",
"one",
"two",
"during",
"months",
"weeks",
"seven",
"three",
"...",
"twelve",
"four",
"five",
"six",
"blackness...",
"you",
"no",
"yes",
"up",
"down",
"onward",
]
if action[0] is '"':
last_quote = action.rfind('"')
action = "You say " + action[: last_quote + 1]
elif first_word.lower() not in dont_add_you:
action = "You " + action[0].lower() + action[1:]
action = remove_phrase(action)
result = remove_phrase(tree["result"])
current_story += "\n> " + action + "\n" + result
action_results = tree["action_results"]
if len(action_results) == 0 or action_results[0] is None:
return [current_story]
else:
stories += make_stories(current_story, action_results[0])
for i in range(1, len(action_results)):
if action_results[i] is not None:
stories += make_stories(tree["result"], action_results[i])
return stories
def get_stories(filename):
tree = load_tree(filename)
stories = []
for action_result in tree["action_results"]:
stories += make_stories(tree["first_story_block"], action_result)
return stories
output_file_path = "text_adventures.txt"
with open(output_file_path, "w") as output_file:
filenames = ["stories/story" + str(i) + ".json" for i in range(0, 93)]
# filenames = []
for filename in filenames:
tree = load_tree(filename)
print('"' + tree["tree_id"] + '",')
filenames += ["stories/crowdsourcedstory" + str(i) + ".json" for i in range(0, 12)]
stories = []
for filename in filenames:
filename_stories = get_stories(filename)
stories += filename_stories
print(len(stories))
raw_text = ""
start_token = "<|startoftext|>"
end_token = "<|endoftext|>"
for story in stories:
raw_text += start_token + story + end_token + "\n"
print(len(raw_text))
output_file.write(raw_text)
|
py | b4127cda58b05d4f6f5ccae0485b2c468a1b638b | import unittest
from tuples import get_coordinate, convert_coordinate, compare_records, create_record, clean_up
class TuplesTest(unittest.TestCase):
def test_get_coordinate(self):
input_data = [("Scrimshaw Whale's Tooth", '2A'),
('Brass Spyglass', '4B'),
('Robot Parrot', '1C'),
('Glass Starfish', '6D'),
('Vintage Pirate Hat', '7E'),
('Pirate Flag', '7F'),
('Crystal Crab', '6A'),
('Model Ship in Large Bottle', '8A'),
('Angry Monkey Figurine', '5B'),
('Carved Wooden Elephant', '8C'),
('Amethyst Octopus', '1F'),
('Antique Glass Fishnet Float', '3D'),
('Silver Seahorse', '4E')]
result_data = ['2A', '4B', '1C', '6D', '7E', '7F', '6A', '8A', '5B', '8C', '1F', '3D', '4E']
for item, result in zip(input_data, result_data):
with self.subTest("tuple/coordinate variants", item=item, result=result):
self.assertEqual(get_coordinate(item), result)
def test_convert_coordinate(self):
input_data = ['2A', '4B', '1C', '6D', '7E', '7F', '6A', '8A', '5B', '8C', '1F', '3D', '4E']
result_data = [('2', 'A'),
('4', 'B'),
('1', 'C'),
('6', 'D'),
('7', 'E'),
('7', 'F'),
('6', 'A'),
('8', 'A'),
('5', 'B'),
('8', 'C'),
('1', 'F'),
('3', 'D'),
('4', 'E')]
for item, result in zip(input_data, result_data):
with self.subTest("coordinate variants for conversion", item=item, result=result):
self.assertEqual(convert_coordinate(item), result)
def test_compare_records(self):
input_data = [
(("Scrimshaw Whale's Tooth", '2A'), ('Deserted Docks', ('2', 'A') ,'Blue')),
(('Brass Spyglass', '4B'), ('Abandoned Lighthouse', ('4', 'B') ,'Blue')),
(('Robot Parrot', '1C'), ('Seaside Cottages', ('1', 'C') ,'Blue')),
(('Glass Starfish', '6D'), ('Tangled Seaweed Patch', ('6', 'D'),'Orange')),
(('Vintage Pirate Hat', '7E'), ('Quiet Inlet (Island of Mystery)', ('7', 'E') ,'Orange')),
(('Amethyst Octopus', '1F'), ('Seaside Cottages', ('1', 'C') ,'Blue')),
(('Angry Monkey Figurine', '5B'), ('Aqua Lagoon (Island of Mystery)', ('1', 'F') ,'Yellow')),
(('Antique Glass Fishnet Float', '3D'), ('Deserted Docks', ('2', 'A') ,'Blue')),
(('Brass Spyglass', '4B'), ('Spiky Rocks', ('3', 'D') ,'Yellow')),
(('Carved Wooden Elephant', '8C'), ('Abandoned Lighthouse', ('4', 'B') ,'Blue'))
]
result_data = [True, True, True, True, True, False, False, False, False, False]
for item, result in zip(input_data, result_data):
with self.subTest("do the coordinates match once reformatted?", item=item, result=result):
self.assertEqual(compare_records(item[0], item[1]), result)
def test_create_record(self):
input_data=[
(('Angry Monkey Figurine', '5B'), ('Stormy Breakwater', ('5', 'B') ,'Purple')),
(('Carved Wooden Elephant', '8C'), ('Foggy Seacave', ('8', 'C'), 'Purple')),
(('Amethyst Octopus', '1F'), ('Aqua Lagoon (Island of Mystery)', ('1', 'F') ,'Yellow')),
(('Antique Glass Fishnet Float', '3D'), ('Spiky Rocks', ('3', 'D'),'Yellow')),
(('Silver Seahorse', '4E'), ('Hidden Spring (Island of Mystery)', ('4', 'E') ,'Yellow')),
(('Amethyst Octopus', '1F'), ('Seaside Cottages', ('1', 'C') ,'Blue')),
(('Angry Monkey Figurine', '5B'), ('Aqua Lagoon (Island of Mystery)', ('1', 'F') ,'Yellow')),
(('Antique Glass Fishnet Float', '3D'), ('Deserted Docks', ('2', 'A') ,'Blue')),
(('Brass Spyglass', '4B'), ('Spiky Rocks', ('3', 'D'),'Yellow')),
(('Carved Wooden Elephant', '8C'), ('Abandoned Lighthouse', ('4', 'B') ,'Blue'))
]
result_data = [
('Angry Monkey Figurine', '5B', 'Stormy Breakwater', ('5', 'B'), 'Purple'),
('Carved Wooden Elephant', '8C', 'Foggy Seacave', ('8', 'C'), 'Purple'),
('Amethyst Octopus', '1F', 'Aqua Lagoon (Island of Mystery)', ('1', 'F'), 'Yellow'),
('Antique Glass Fishnet Float', '3D', 'Spiky Rocks', ('3', 'D'), 'Yellow'),
('Silver Seahorse', '4E', 'Hidden Spring (Island of Mystery)', ('4', 'E'), 'Yellow'),
'not a match',
'not a match',
'not a match',
'not a match',
'not a match'
]
for item, result in zip(input_data, result_data):
with self.subTest("make record if coordinates match", item=item, result=result):
self.assertEqual(create_record(item[0], item[1]), result)
def test_clean_up(self):
input_data = (
("Scrimshaw Whale's Tooth", '2A', 'Deserted Docks', ('2', 'A'), 'Blue'),
('Brass Spyglass', '4B', 'Abandoned Lighthouse', ('4', 'B'), 'Blue'),
('Robot Parrot', '1C', 'Seaside Cottages', ('1', 'C'), 'Blue'),
('Glass Starfish', '6D', 'Tangled Seaweed Patch', ('6', 'D'), 'Orange'),
('Vintage Pirate Hat', '7E', 'Quiet Inlet (Island of Mystery)', ('7', 'E'), 'Orange'),
('Pirate Flag', '7F', 'Windswept Hilltop (Island of Mystery)', ('7', 'F'), 'Orange'),
('Crystal Crab', '6A', 'Old Schooner', ('6', 'A'), 'Purple'),
('Model Ship in Large Bottle', '8A', 'Harbor Managers Office', ('8', 'A'), 'Purple'),
('Angry Monkey Figurine', '5B', 'Stormy Breakwater', ('5', 'B'), 'Purple'),
('Carved Wooden Elephant', '8C', 'Foggy Seacave', ('8', 'C'), 'Purple'),
('Amethyst Octopus', '1F', 'Aqua Lagoon (Island of Mystery)', ('1', 'F'), 'Yellow'),
('Antique Glass Fishnet Float', '3D', 'Spiky Rocks', ('3', 'D'), 'Yellow'),
('Silver Seahorse', '4E', 'Hidden Spring (Island of Mystery)', ('4', 'E'), 'Yellow')
)
result_data = """(\"Scrimshaw Whale's Tooth\", 'Deserted Docks', ('2', 'A'), 'Blue')\n\
('Brass Spyglass', 'Abandoned Lighthouse', ('4', 'B'), 'Blue')\n\
('Robot Parrot', 'Seaside Cottages', ('1', 'C'), 'Blue')\n\
('Glass Starfish', 'Tangled Seaweed Patch', ('6', 'D'), 'Orange')\n\
('Vintage Pirate Hat', 'Quiet Inlet (Island of Mystery)', ('7', 'E'), 'Orange')\n\
('Pirate Flag', 'Windswept Hilltop (Island of Mystery)', ('7', 'F'), 'Orange')\n\
('Crystal Crab', 'Old Schooner', ('6', 'A'), 'Purple')\n\
('Model Ship in Large Bottle', 'Harbor Managers Office', ('8', 'A'), 'Purple')\n\
('Angry Monkey Figurine', 'Stormy Breakwater', ('5', 'B'), 'Purple')\n\
('Carved Wooden Elephant', 'Foggy Seacave', ('8', 'C'), 'Purple')\n\
('Amethyst Octopus', 'Aqua Lagoon (Island of Mystery)', ('1', 'F'), 'Yellow')\n\
('Antique Glass Fishnet Float', 'Spiky Rocks', ('3', 'D'), 'Yellow')\n\
('Silver Seahorse', 'Hidden Spring (Island of Mystery)', ('4', 'E'), 'Yellow')\n"""
self.assertEqual(clean_up(input_data), result_data)
|
py | b4127d2cad8e88ae0b6901cf3bdad07f372fb4bf | # https://www.codechef.com/viewsolution/35855472
for _ in range(int(input())):
c_score = 0
m_score = 0
for _ in range(int(input())):
chef, morty = input().split()
chef = sum(int(i) for i in chef)
morty = sum(int(i) for i in morty)
if chef > morty:
c_score += 1
elif morty > chef:
m_score += 1
else:
c_score += 1
m_score += 1
if c_score > m_score: print(f'0 {c_score}')
elif m_score > c_score: print(f'1 {m_score}')
else: print(f'2 {m_score}') |
py | b4127e38e280961389e2262df263224a4b50f3e9 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vi:ts=4 sw=4 et
# This script has been tested and developed for Python 2.7.
# It should be simple to convert it to Python 3.
# It should be even better to convert it to Python 3.4.
#
# This script does not require any external library.
#
# Want to extract all embedded images from all SVG files from your system?
# locate .svg | while read f ; do [ -f "$f" ] && grep -l 'xlink:href.*data:' "$f" ; done | while read f ; do ./extract_embedded_images_from_svg.py -p tmp/`basename "$f"`- "$f" ; done
from __future__ import division
from __future__ import print_function
import argparse
import base64
import errno
import io
import re
import os
import os.path
import sys
import xml.etree.ElementTree
import textwrap
import urllib
def parse_arguments():
parser = argparse.ArgumentParser(
description='Extracts all embedded images inside an SVG file.',
epilog=textwrap.dedent('''
SVGZ (compressed SVG) files are not supported directly, uncompress them first:
gunzip -c foobar.svgz | {0} -
For extracting images from PDF files, try 'pdfimages -j' from poppler package.
'''.format(os.path.basename(sys.argv[0]))),
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-p', '--prefix',
action='store',
help='prefix when saving the extracted images (default: the SVG pathname)'
)
parser.add_argument(
'svgfile',
action='store',
type=argparse.FileType('rb'),
help='the SVG file containing embedded images'
)
args = parser.parse_args()
return args
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
# In Python 3.2, this piece of code can be replaced by a single line.
def mkdir_p(path):
# For Python 3.2 or later:
# os.makedirs(path, exist_ok=True)
# For Python 2:
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def decode_data_url(url):
# In Python 3.4, urllib.request already supports data URLs.
# http://hg.python.org/cpython/rev/a182367eac5a
# return urllib.request.urlopen(data_url).read()
#
# For earlier Python, we still need this function here.
# The code below is based on:
# http://hg.python.org/cpython/file/8fe3022af4b3/Lib/urllib/request.py
scheme, data = url.split(':', 1)
if scheme != 'data':
return None, None
mediatype, data = data.split(',', 1)
# Even base64 encoded data URLs might be quoted so unquote in any case:
# data = urllib.parse.unquote_to_bytes(data) # For Python 3
data = urllib.unquote(data) # For Python 2.7
if mediatype.endswith(';base64'):
# data = base64.decodebytes(data) # For Python 3
data = base64.decodestring(data) # For Python 2.7
mediatype = mediatype[:-7]
if not mediatype:
mediatype = "text/plain;charset=US-ASCII"
# Discarding the charset information.
mediatype = mediatype.partition(';')[0]
return data, mediatype
# Based on:
# https://github.com/django/django/blob/e2ae8b048e7198428f696375b8bdcd89e90002d1/django/utils/text.py#L213
def get_valid_filename(s):
s = s.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def stripNS(s):
'''Receives either a tag name or an attribute name, returns the name
without the namespace.
>>> stripNS('{http://www.w3.org/2000/svg}image')
'image'
>>> stripNS('id')
'id'
'''
return s.rpartition('}')[2]
def extract_from_svg(svgfile, prefix):
# Creating dirs for prefix.
parts = prefix.rpartition('/')
prefix_dir = parts[0] + parts[1]
prefix_file = parts[2]
if prefix_dir:
mkdir_p(prefix_dir)
image_count = 0
mime_to_extension = {
'image/gif': '.gif',
'image/jpeg': '.jpg',
'image/png': '.png',
'image/svg+xml': '.svg', # Inception!?
}
for (event, elem) in xml.etree.ElementTree.iterparse(svgfile, ['start']):
tag = stripNS(elem.tag)
if tag == 'image':
attrs = {stripNS(k): v for k, v in elem.attrib.items()}
if 'href' in attrs:
url = attrs['href']
if url.startswith('data:'):
data, mimetype = decode_data_url(url)
image_count += 1
sane_id = get_valid_filename(attrs.get('id', ''))
extension = mime_to_extension.get(mimetype, '')
out_filename = '{0}{1}{2}{3}'.format(
prefix,
image_count,
'-' + sane_id if sane_id else '',
extension)
with open(out_filename, 'wb') as out:
out.write(data)
def main():
options = parse_arguments()
if options.prefix is None:
options.prefix = options.svgfile.name if options.svgfile.name != '<stdin>' else 'stdin'
extract_from_svg(options.svgfile, options.prefix)
if __name__ == "__main__":
main()
|
py | b4127f506ec687ebe053687294c35a4a5568b52c | from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
# Super user details
# Email: [email protected]
# Name: admin
# Password: password
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create new user profile"""
if not email:
raise ValueError('User must have email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""cureate new super user"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
""""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrive full name"""
return self.name
def get_short_name(self):
"""Retrive short name"""
return self.name
def __str__(self):
"""Return string representation of the user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profiles = models.ForeignKey(
settings.AUTH_USER_MODEL, # or UserProfile
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now=True)
def __str__(self):
"""return model as string"""
return self.status_text
|
py | b4128019bced1afe70e15ccebd1acc1eda5adf76 | #!/usr/bin/env python3
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Runs all E2E TensorFlow tests and extracts their benchmarking artifacts.
Example usages:
# Run all test suites and collect their artifacts:
python3 ./scripts/get_e2e_artifacts.py
# Run the e2e_tests test suite and collect its artifacts:
python3 ./scripts/get_e2e_artifacts.py --test_suites=e2e_tests
"""
import fileinput
import os
import re
import subprocess
import tempfile
from typing import Dict, Set
import zipfile
import utils
from absl import app
from absl import flags
SUITE_NAME_TO_TARGET = {
'e2e_tests':
'//integrations/tensorflow/e2e:e2e_tests',
'mobile_bert_squad_tests':
'//integrations/tensorflow/e2e:mobile_bert_squad_tests',
'layers_tests':
'//integrations/tensorflow/e2e/keras/layers:layers_tests',
'layers_dynamic_batch_tests':
'//integrations/tensorflow/e2e/keras/layers:layers_dynamic_batch_tests',
'layers_training_tests':
'//integrations/tensorflow/e2e/keras/layers:layers_training_tests',
'keyword_spotting_tests':
'//integrations/tensorflow/e2e/keras:keyword_spotting_tests',
'keyword_spotting_internal_streaming_tests':
'//integrations/tensorflow/e2e/keras:keyword_spotting_internal_streaming_tests',
'imagenet_non_hermetic_tests':
'//integrations/tensorflow/e2e/keras/applications:imagenet_non_hermetic_tests',
'slim_vision_tests':
'//integrations/tensorflow/e2e/slim_vision_models:slim_vision_tests',
}
SUITES_HELP = [f'`{name}`' for name in SUITE_NAME_TO_TARGET]
SUITES_HELP = f'{", ".join(SUITES_HELP[:-1])} and {SUITES_HELP[-1]}'
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'dry_run', False,
'Run without extracting files. Useful for quickly checking for artifact '
'collisions.')
flags.DEFINE_string(
'artifacts_dir', os.path.join(tempfile.gettempdir(), 'iree', 'modules'),
'Directory to transfer the benchmarking artifacts to. Defaults to '
'/tmp/iree/modules/')
flags.DEFINE_bool('run_test_suites', True, 'Run any specified test suites.')
flags.DEFINE_list('test_suites', list(SUITE_NAME_TO_TARGET.keys()),
f'Any combination of {SUITES_HELP}.')
EXPECTED_COLLISIONS = [
'/tf_ref/', 'tf_input.mlir', 'iree_input.mlir', '/saved_model/'
]
def _target_to_testlogs_path(target: str) -> str:
"""Convert target into the path where Bazel stores the artifacts we want."""
return os.path.join('bazel-testlogs',
target.replace('//', '').replace(':', os.sep))
def _target_to_test_name(target: str, test_suite_path: str) -> str:
"""Get test_name from `suite_name_test_name__tf__backend_name`."""
return target.split('__')[0].replace(f'{test_suite_path}_', '')
def get_test_paths_and_names(test_suite_path: str):
"""Get the paths Bazel stores test outputs in and the matching test names."""
targets = utils.get_test_targets(test_suite_path)
test_paths = [_target_to_testlogs_path(target) for target in targets]
test_names = [
_target_to_test_name(target, test_suite_path) for target in targets
]
return test_paths, test_names
def check_collision(filename: str, test_name: str, written_paths: Set[str],
paths_to_tests: Dict[str, str]):
"""Check that we aren't overwriting files unless we expect to."""
# Note: We can't use a check that the files have identical contents because
# tf_input.mlir can have random numbers appended to its function names.
# See https://github.com/google/iree/issues/3375
expected_collision = any([name in filename for name in EXPECTED_COLLISIONS])
if filename in written_paths and not expected_collision:
raise ValueError(f'Collision found on {filename} between {test_name}.py '
f'and {paths_to_tests[filename]}.py')
else:
written_paths.add(filename)
paths_to_tests[filename] = test_name
def update_path(archive_path: str):
"""Update the --module_file flag with the new location of the compiled.vmfb"""
backend_path = archive_path.split('traces')[0] # 'ModuleName/backend_name'.
compiled_path = os.path.join(FLAGS.artifacts_dir, backend_path,
'compiled.vmfb')
flagfile_path = os.path.join(FLAGS.artifacts_dir, archive_path)
for line in fileinput.input(files=[flagfile_path], inplace=True):
if line.strip().startswith('--module_file'):
print(f'--module_file={compiled_path}\n', end='')
else:
print(line, end='')
def extract_artifacts(test_path: str, test_name: str, written_paths: Set[str],
paths_to_tests: Dict[str, str]):
"""Unzips all of the benchmarking artifacts for a given test and backend."""
outputs = os.path.join(test_path, 'test.outputs', 'outputs.zip')
if FLAGS.dry_run and not os.path.exists(outputs):
# The artifacts may or may not be present on disk during a dry run. If they
# are then we want to collision check them, but if they aren't that's fine.
return
archive = zipfile.ZipFile(outputs)
# Filter out directory names.
filenames = [name for name in archive.namelist() if name[-1] != os.sep]
for filename in filenames:
# Check for collisions.
check_collision(filename, test_name, written_paths, paths_to_tests)
# Extract and update flagfile path.
if not FLAGS.dry_run:
archive.extract(filename, FLAGS.artifacts_dir)
if filename.endswith('flagfile'):
update_path(filename)
def main(argv):
del argv # Unused.
print(
"The bazel integrations build and tests are deprecated. This script "
"may be reworked in the future. For the time being refer to "
"https://google.github.io/iree/building-from-source/python-bindings-and-importers/ "
"and https://github.com/google/iree/blob/main/docs/developers/developing_iree/e2e_benchmarking.md "
"for information on how to run TensorFlow benchmarks.")
exit(1)
# Convert test suite shorthands to full test suite targets.
test_suites = [SUITE_NAME_TO_TARGET[suite] for suite in FLAGS.test_suites]
if FLAGS.run_test_suites:
# Use bazel test to execute all of the test suites in parallel.
command = ['bazel', 'test', *test_suites, '--color=yes']
print(f'Running: `{" ".join(command)}`')
if not FLAGS.dry_run:
subprocess.run(command, check=True)
print()
written_paths = set()
paths_to_tests = dict()
for test_suite in test_suites:
# Extract all of the artifacts for this test suite.
test_paths, test_names = get_test_paths_and_names(test_suite)
for i, (test_path, test_name) in enumerate(zip(test_paths, test_names)):
print(f'\rTransfering {test_suite} {i + 1}/{len(test_paths)}', end='')
extract_artifacts(test_path, test_name, written_paths, paths_to_tests)
print('\n')
if __name__ == '__main__':
app.run(main)
|
py | b41280b78d9988f0549b17b6e5a635631b601c17 | name = "bio_info"
|
py | b41280e7849fc62901690438278f0836168dc726 | import sys
import json
import re
import shutil
import importlib
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Dict, List
from loguru import logger
import yaml
sys.path.append("app/core/validator")
from src.exceptions import DatatypeException, YamlException, LabelException
global LOCAL
LOCAL=False
def get_img_list(file_paths: str, img_list: List[str]) -> List[str]:
img_file_types = get_img_file_types()
for types in img_file_types:
files = Path(file_paths).glob(f"**/{types}")
for f in files:
if f.is_file():
img_list.append(str(f))
return img_list
def get_label_list(file_paths: Path, label_list: List[str]) -> List[str]:
glob_list = ["**/*.txt", "**/*.json", "**/*.xml"]
for g in glob_list:
files = Path(file_paths).glob(g)
for f in files:
if f.is_file():
label_list.append(str(f))
return label_list
def get_file_lists(dir_path: str):
"""
Return image and label file list in form List[str], List[str]
"""
img_list = []
label_list = []
p = Path(dir_path)
label_list = get_label_list(p, label_list)
img_list = get_img_list(p, img_list)
return sorted(img_list), sorted(label_list)
def yaml_safe_load(yaml_path: str) -> Dict[str, any]:
with open(yaml_path, "r") as f:
data_dict = yaml.safe_load(f)
return data_dict
def delete_dirs(dir_path: str):
shutil.rmtree(dir_path)
def json_load(json_path: str) -> Dict[str, any]:
with open(json_path, "r") as json_file:
json_dict = json.load(json_file)
return json_dict
def get_label2id(label_list: List[str], num_classes: int) -> Dict[str, int]:
label2id = {}
class_num = 1
for l in label_list:
try:
xml_obj = xml_load(l)
except:
raise LabelException(f"{l} file is broken.")
for obj in xml_obj.findall("object"):
class_name = obj.findtext("name")
# read class_name
if class_name not in label2id.keys():
label2id.update({f"{class_name}": class_num})
class_num += 1
return label2id
def get_bbox_from_xml_obj(
obj, label2id: Dict[str, str], anno: str, errors: List[str]
) -> (int, int, int, int, List[str]):
xml_file_name = Path(anno).parts[-1]
try:
label = obj.findtext("name")
if not (label in label2id):
errors.append(f"{label} is not in 'yaml file', but in {anno} file.")
except:
errors.append(f"Can not find <name> in {anno}.")
bndbox = obj.find("bndbox")
if not bndbox:
errors.append(f"Can not find <bndbox> in {anno}.")
return 0, 0, 0, 0, errors
def try_convert_bbox2number(
bndbox, coord_name: str, anno: str, errors: List[str]
) -> int:
try:
ret = int(float(bndbox.findtext(coord_name)))
except:
errors.append(
f"{bndbox.findtext(coord_name)} is not a number in {anno} file."
)
ret = 0
return ret, errors
xmin, errors = try_convert_bbox2number(bndbox, "xmin", anno, errors)
xmax, errors = try_convert_bbox2number(bndbox, "xmax", anno, errors)
ymin, errors = try_convert_bbox2number(bndbox, "ymin", anno, errors)
ymax, errors = try_convert_bbox2number(bndbox, "ymax", anno, errors)
return xmin, ymin, xmax, ymax, errors
def get_image_info_xml(annotation_root, extract_num_from_imgid=True) -> Dict[str, any]:
path = annotation_root.findtext("path")
if path is None:
filename = annotation_root.findtext("filename")
else:
filename = str(Path(path).parts[-1])
img_name = str(Path(filename).parts[-1])
img_id = str(Path(img_name).stem)
if extract_num_from_imgid and isinstance(img_id, str):
img_id = int(re.findall(r"\d+", img_id)[0])
size = annotation_root.find("size")
width = int(size.findtext("width"))
height = int(size.findtext("height"))
image_info = {"file_name": filename, "height": height, "width": width, "id": img_id}
return image_info
def xml_load(xml_path: str):
tree = ET.parse(xml_path)
annotation_root = tree.getroot()
return annotation_root
# Not used function
# def validate_first_dirs(dir_path: str, errors: List[str]) -> List[str]:
# """
# Validate if dir_path has directory other than ['train', 'val', 'test'].
# """
# paths = Path(dir_path).glob("*")
# check_dir_paths = []
# ret_dir_paths = []
# for p in paths:
# if p.is_dir():
# check_dir_paths.append(str(p.name))
# ret_dir_paths.append(str(p))
# if not ("train" in check_dir_paths):
# errors.append("Dataset dosen't have 'train' dir.")
# return ret_dir_paths, errors
# correct_cases = [
# set(["train", "val", "test"]),
# set(["train", "val"]),
# set(["train", "test"]),
# ]
# if set(check_dir_paths) not in correct_cases:
# errors.append(
# "Dataset has wrong directory structure. Any other directory than ['train', 'val', 'test'] is not accepted in first depth."
# )
# return ret_dir_paths, errors
# Not used function
# def validate_second_dirs(dir_path: List[str], errors: List[str]) -> List[str]:
# ret_dir_paths = []
# for sub_dir in dir_path:
# paths = Path(sub_dir).glob("*")
# check_dir_paths = []
# for p in paths:
# if p.is_dir():
# check_dir_paths.append(str(p.name))
# ret_dir_paths.append(str(p))
# if not ("images" in check_dir_paths):
# errors.append(f"Dataset dosen't have 'images' dir under {sub_dir}.")
# if not ("labels" in check_dir_paths):
# errors.append(f"Dataset dosen't have 'labels' dir under {sub_dir}.")
# return errors
def validate_second_dir(dir_path: Path, errors: List[str]) -> List[str]:
"""
Validate dir_path has 'images' and 'labels' dir or not.
"""
paths = Path(dir_path).glob("*")
check_dir_paths = []
for p in paths:
if p.is_dir():
check_dir_paths.append(str(p.name))
if not ("images" in check_dir_paths):
errors.append(f"Dataset dosen't have 'images' dir under {dir_path}.")
if not ("labels" in check_dir_paths):
errors.append(f"Dataset dosen't have 'labels' dir under {dir_path}.")
return errors
def replace_images2labels(path: str) -> str:
# for case of linux and mac user
path = path.replace("train/images/", "train/labels/", 1)
path = path.replace("val/images/", "val/labels/", 1)
path = path.replace("test/images/", "test/labels/", 1)
# for case of windows user
path = path.replace("train\images", "train\labels", 1)
path = path.replace("val\images", "val\labels", 1)
path = path.replace("test\images", "test\labels", 1)
return path
def get_filename_wo_suffix(file_path: str):
file_path = file_path.split(".")
file_path = ".".join(file_path[:-1])
return file_path
def validate_image_files_exist(
img_list: List[str], label_list: List[str], suffix: str, errors: List[str]
):
img_name, label_name = [], []
for i in img_list:
path_wo_suffix = get_filename_wo_suffix(i)
path_wo_suffix = replace_images2labels(path_wo_suffix)
img_name += [path_wo_suffix]
for l in label_list:
label_name = get_filename_wo_suffix(l)
if not label_name in img_name:
errors.append(f"There is no image file for annotation file '{l}'")
return errors
def validate_data_yaml(yaml_path: str, errors: List[str]):
yaml_path = Path(yaml_path)
if not yaml_path.is_file():
raise YamlException(f"There is not {str(yaml_path)}")
try:
data_dict = yaml_safe_load(str(yaml_path))
except:
raise YamlException(f"{str(yaml_path)} file is broken.")
if not data_dict.get("names"):
raise YamlException(f"There is no 'names' in {str(yaml_path)}.")
if not data_dict.get("nc"):
raise YamlException(f"There is no 'nc' in {str(yaml_path)}.")
if len(data_dict["names"]) != data_dict["nc"]:
errors.append(
f"Length of 'names' and value of 'nc' in {str(yaml_path)} must be same."
)
num_classes = max([len(data_dict["names"]), data_dict["nc"]])
return data_dict["names"], errors, num_classes
def validate_dataset_type(root_path: str, user_data_type: str):
"""
data_type in ["coco", "yolo", "voc"]
"""
data_type = None
paths = Path(root_path).glob("**/*")
for p in paths:
suffix = str(p.suffix)
if suffix == ".xml":
data_type = "voc"
if suffix == ".txt":
data_type = "yolo"
break
if suffix == ".json":
data_type = "coco"
break
if not data_type:
raise DatatypeException(f"There are not any annotation files in {root_path}.")
elif user_data_type != data_type:
raise DatatypeException(
f"Check correct data type, your dataset type looks like '{data_type}'."
)
def get_dir_list(path: Path) -> List[str]:
"""
Return directory list
"""
dir_list = []
paths = Path(path).glob("**/*")
for p in paths:
if p.is_dir():
dir_list.append(str(p))
return dir_list
def does_it_have(paths: str, file_type_list: List[str]) -> bool:
flag = False
for types in file_type_list:
files = Path(paths).glob(f"{types}")
num_files = sum(1 for _ in files)
if num_files > 0:
flag = True
return flag
return flag
def get_target_dirs(dir_paths: List[str], file_types: List[str]) -> List[str]:
"""
Return directory list which have files in same file types in file_types.
"""
ret_dir_paths = []
for p in dir_paths:
answer = does_it_have(p, file_types)
if answer:
ret_dir_paths.append(p)
continue
return ret_dir_paths
def get_img_file_types() -> List[str]:
img_file_types = [
"*.jpeg",
"*.JPEG",
"*.jpg",
"*.JPG",
"*.png",
"*.PNG",
"*.BMP",
"*.bmp",
"*.TIF",
"*.tif",
"*.TIFF",
"*.tiff",
"*.DNG",
"*.dng",
"*.WEBP",
"*.webp",
"*.mpo",
"*.MPO",
]
return img_file_types
def write_error_txt(errors: List[str]):
f = open("validation_result.txt", "w")
for e in errors:
f.write(e + "\n")
f.close()
def log_n_print(message:str):
if not LOCAL:
logger.debug(message)
else:
print(message)
def validate_detection_task(
root_path: str,
data_format: str,
yaml_path: str,
fix=False,
):
errors = []
dir_path = Path(root_path)
errors = validate_second_dir(dir_path, errors)
log_n_print("[Validate: 1/5]: Done validation dir structure ['images', 'labels'].")
validate_dataset_type(root_path, data_format)
log_n_print("[Validate: 2/5]: Done validation, user select correct data type.")
img_list, label_list = get_file_lists(dir_path)
yaml_label, errors, num_classes = validate_data_yaml(yaml_path, errors)
log_n_print(f"[Validate: 3/5]: Done validation for {yaml_path} file.")
_validate = getattr(
importlib.import_module(f"src.{data_format.lower()}"),
"validate",
)
errors = _validate(
dir_path, num_classes, label_list, img_list, yaml_label, errors, fix
)
return errors
def validate_classification_task(
root_path: str,
data_format: str,
yaml_path: str,
):
errors = []
dir_path = Path(root_path)
#dir_paths, errors = validate_first_dirs(dir_path, errors)
#log_n_print("[Validate: 1/4]: Done validation dir structure ['train', 'val', 'test'].")
yaml_label, errors, _ = validate_data_yaml(yaml_path, errors)
log_n_print(f"[Validate: 1/3]: Done validation for {yaml_path} file.")
_validate = getattr(
importlib.import_module(f"src.{data_format.lower()}"),
"validate",
)
errors = _validate(
yaml_label, dir_path, errors
)
return errors
def validate(
root_path: str,
data_format: str,
yaml_path: str,
task: str="detection",
delete=False,
fix=False,
local=False # True for local run, False for BE run.
):
LOCAL = local
log_n_print("Start dataset validation.")
log_n_print("=========================")
log_n_print(f"data path: {root_path}")
log_n_print(f"data format: {data_format}")
log_n_print(f"yaml path: {yaml_path}")
log_n_print(f"autofix: {fix}")
log_n_print("=========================")
if task == "detection":
errors = validate_detection_task(root_path, data_format, yaml_path)
elif task == "classification":
errors = validate_classification_task(root_path, data_format, yaml_path)
if len(errors) == 0:
log_n_print("Validation completed! Now try your dataset on NetsPresso!")
else:
write_error_txt(errors)
if local:
log_n_print("Validation error, please check 'validation_result.txt'.")
else:
log_n_print(
"Validation error, please visit 'https://github.com/Nota-NetsPresso/NetsPresso-ModelSearch-Dataset-Validator' and validate dataset."
)
if delete:
delete_dirs(Path(root_path))
|
py | b41281786c8a5f16be3f841718ba9344f8aa072a | import PySimpleGUI as sg
class Telapython :
def __init__(self):
#sg.theme_previewer()
sg.change_look_and_feel('darkbrown2')
#layout
layout = [
[sg.Text('Nome',size=(5,0)), sg.Input(size=(10,0),key='Nome')],
[sg.Text('Idade',size=(5,0)), sg.Input(size=(10,0),key='Idade')],
[sg.Text('Quais provedores de e-mail são aceitos ?')],
[sg.Checkbox('Gmai',key='Gmail'), sg.Checkbox('Outlook',key='Outlook'), sg.Checkbox('Yahoo',key='Yahoo')],
[sg.Text('Você aceita cartão ?')],
[sg.Radio('Aceito','cartão',key='cartao_sim'),sg.Radio('Não aceito','cartão',key='cartao_nao')],
[sg.Button('Enviar Dados',size=(20,0))],
[sg.Text('Velocidade de Script')],
[sg.Slider(range=(0,255),default_value=0,orientation='h',size=(30,20),key='velocidade_slider')],
[sg.Output(size=(30,20))]
]
#janela
self.janela = sg.Window('Dados do Usuário').layout(layout)
def iniciar(self):
while True:
#extrair os dados da tela
self.Button_dados , self.values = self.janela.Read()
nome = self.values['Nome']
idade = self.values['Idade']
aceita_gmail = self.values['Gmail']
aceita_outlook = self.values['Outlook']
aceita_yahoo = self.values['Yahoo']
cartao_sim = self.values['cartao_sim']
cartao_nao = self.values['cartao_nao']
velocidade_script = self.values['velocidade_slider']
print(f'Dados do Usuário')
print('-'*16)
print(f'nome : {nome}')
print(f'idade : {idade}')
print(f'Gmail : {aceita_gmail}')
print(f'Outlook : {aceita_outlook}')
print(f'Yahoo : {aceita_yahoo}')
if cartao_sim:
print(f'cartão_sim : {cartao_sim}')
else:
print(f'cartão_não : {cartao_nao}')
print(f'velocidade script : {velocidade_script}')
print('-'*16)
tela = Telapython()
tela.iniciar()
|
py | b4128195bb7c2f3dcb7298f7141b3f25ac2a4b7f | import csv
import sys
import json
import sqlite3
import hashlib
from datetime import date, datetime
def read(filename):
result = []
with open(filename) as csv_in:
reader = csv.DictReader(csv_in)
for row in reader:
result.append(row)
return result
def get_sinasc():
sinasc = read("e_DNPR2016.csv")
print("nascimento:", len(sinasc))
return sinasc
def get_sim():
sim = read("e_DOPR2016.csv")
print("mortalidade:", len(sim))
return sim
def get_attend(i):
attend_filenames = [
"e_RDPR1601.csv",
"e_RDPR1602.csv",
"e_RDPR1603.csv",
"e_RDPR1604.csv",
"e_RDPR1605.csv",
"e_RDPR1606.csv",
"e_RDPR1607.csv",
"e_RDPR1608.csv",
"e_RDPR1609.csv",
"e_RDPR1610.csv",
"e_RDPR1611.csv",
"e_RDPR1612.csv"
]
filename = attend_filenames[i]
atted = read(filename)
print("total attendimento mes", i + 1, len(atted))
return atted
def to_hash(*args):
hash = hashlib.md5()
for arg in args:
hash.update(arg.encode())
return hash.hexdigest()
def get_date_1(value):
if len(value) == 8:
day = int(value[0:2])
month = int(value[2:4])
year = int(value[4:8])
return date(year, month, day)
return None
def get_date_2(nasc):
if len(nasc) == 8:
year = int(nasc[0:4])
month = int(nasc[4:6])
day = int(nasc[6:8])
return date(year, month, day)
return None
def main():
sim_sql = """
CREATE TABLE sim (
ID integer PRIMARY KEY,
ID_INTEGRACAO text,
SEXO text,
RACACOR text,
DTNASC date,
CODMUNRES text,
DTOBITO date,
LOCOCOR text,
CAUSABAS text
);"""
conn = sqlite3.connect("sim_sihsus.sqlite")
c = conn.cursor()
c.execute(sim_sql)
for row in get_sim():
if len(row["DTNASC"]) > 0 and row["SEXO"] != "Ignorado" and len(row["RACACOR"]) > 0 and (row["LOCOCOR"] == "Hospital" or row["LOCOCOR"] == "Outro estab saude"):
DTNASC = get_date_1(row['DTNASC'])
SEXO = row["SEXO"]
RACACOR = row["RACACOR"]
CODMUNRES = row['CODMUNRES']
DTOBITO = get_date_1(row['DTOBITO'])
LOCOCOR = row['LOCOCOR']
CAUSABAS = row['CAUSABAS']
ID_INTEGRACAO = to_hash(DTNASC.isoformat(), SEXO, RACACOR, CODMUNRES, DTOBITO.isoformat(), "Com obito")
c.execute('''INSERT INTO sim (ID_INTEGRACAO, SEXO, RACACOR, DTNASC, CODMUNRES, DTOBITO, LOCOCOR, CAUSABAS) VALUES (?,?,?,?,?,?,?,?)''', (ID_INTEGRACAO,SEXO,RACACOR,DTNASC,CODMUNRES,DTOBITO,LOCOCOR,CAUSABAS))
sihsus_sql = """
CREATE TABLE sihsus (
ID integer PRIMARY KEY,
ID_INTEGRACAO text,
ID_ATENDIMENTO text,
SEXO text,
RACA_COR text,
NASC date,
MUNIC_RES text,
CEP text,
DT_INTER date,
DT_SAIDA date,
DIAG_PRINC text,
MORTE text
);"""
c.execute(sihsus_sql)
for i in range(0, 12):
attend = get_attend(i)
for row in attend:
if row["RACA_COR"] != "Ignorado":
NASC = get_date_1(row['NASC'])
SEXO = row["SEXO"]
RACA_COR = row["RACA_COR"]
MUNIC_RES = row['MUNIC_RES']
CEP = row['CEP']
DT_INTER = get_date_2(row['DT_INTER'])
DT_SAIDA = get_date_2(row['DT_SAIDA'])
DIAG_PRINC = row['DIAG_PRINC']
MORTE = row["MORTE"]
ID_INTEGRACAO = to_hash(NASC.isoformat(), SEXO, RACA_COR, MUNIC_RES, DT_SAIDA.isoformat(), MORTE)
ID_ATENDIMENTO = to_hash(NASC.isoformat(), SEXO, RACA_COR, MUNIC_RES, CEP)
c.execute('''INSERT INTO sihsus (ID_INTEGRACAO, ID_ATENDIMENTO, SEXO, RACA_COR, NASC, MUNIC_RES, CEP, DT_INTER, DT_SAIDA, DIAG_PRINC, MORTE) VALUES (?,?,?,?,?,?,?,?,?,?,?)''', (ID_INTEGRACAO,ID_ATENDIMENTO,SEXO,RACA_COR,NASC,MUNIC_RES,CEP,DT_INTER,DT_SAIDA,DIAG_PRINC,MORTE))
conn.commit()
conn.close()
if __name__ == "__main__":
main() |
py | b412821b998b2410f0b823e4474b8569ce04d45f | import os
import sys
import inspect
def add_path_to_parent_folder():
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir) |
py | b41283890b95d77f5a7781e69982f339f9d030c3 | from .QuantConv import *
from .QuantConvBN import *
from .QuantRelu import *
|
py | b412838a78b4bfe9a14bfd47bd2cf5aabb1b250b | import torch
import torch.nn.functional as F
class NormalizeRotation(object):
r"""Rotates all points so that the eigenvectors overlie the axes of the
Cartesian coordinate system.
If the data additionally holds normals saved in :obj:`data.norm` these will
be also rotated.
Args:
max_points (int, optional): If set to a value greater than :obj:`0`,
only a random number of :obj:`max_points` points are sampled and
used to compute eigenvectors. (default: :obj:`-1`)
"""
def __init__(self, max_points=-1):
self.max_points = max_points
def __call__(self, data):
pos = data.pos
if self.max_points > 0 and pos.size(0) > self.max_points:
perm = torch.randperm(pos.size(0))
pos = pos[perm[:self.max_points]]
pos = pos - pos.mean(dim=0, keepdim=True)
C = torch.matmul(pos.t(), pos)
e, v = torch.eig(C, eigenvectors=True) # v[:,j] is j-th eigenvector
data.pos = torch.matmul(data.pos, v)
if 'norm' in data:
data.norm = F.normalize(torch.matmul(data.norm, v))
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
py | b41283ba6812b46e81dc97145a43ec5705ef13da | from dbindexer import autodiscover
autodiscover()
|
py | b412842be3377d982e7aa5cad692903c734fa9ef | from model import *
from data import *
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)
model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=300,epochs=1,callbacks=[model_checkpoint])
testGene = testGenerator("data/membrane/test")
results = model.predict_generator(testGene,30,verbose=1)
saveResult("data/membrane/test",results)
|
py | b4128433b5ebeb22f6d17ed37dfb616bc69a215b | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ArgumentsRequired
class coss(Exchange):
def describe(self):
return self.deep_extend(super(coss, self).describe(), {
'id': 'coss',
'name': 'COSS',
'countries': ['SG', 'NL'],
'rateLimit': 1000,
'version': 'v1',
'certified': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/50328158-22e53c00-0503-11e9-825c-c5cfd79bfa74.jpg',
'api': {
'trade': 'https://trade.coss.io/c/api/v1',
'engine': 'https://engine.coss.io/api/v1',
'public': 'https://trade.coss.io/c/api/v1',
'web': 'https://trade.coss.io/c', # undocumented
'exchange': 'https://exchange.coss.io/api',
},
'www': 'https://www.coss.io',
'doc': 'https://api.coss.io/v1/spec',
'referral': 'https://www.coss.io/c/reg?r=OWCMHQVW2Q',
},
'has': {
'fetchTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchMarkets': True,
'fetchCurrencies': True,
'fetchBalance': True,
'fetchOrderBook': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchClosedOrders': True,
'fetchOpenOrders': True,
'fetchOHLCV': True,
'createOrder': True,
'cancelOrder': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
},
'api': {
'exchange': {
'get': [
'getmarketsummaries',
],
},
'public': {
'get': [
'market-price',
'exchange-info',
],
},
'web': {
'get': [
'coins/getinfo/all', # undocumented
'order/symbols', # undocumented
'coins/get_base_list', # undocumented
],
},
'engine': {
'get': [
'dp',
'ht',
'cs',
],
},
'trade': {
'get': [
'ping',
'time',
'account/balances',
'account/details',
],
'post': [
'order/add',
'order/details',
'order/list/open',
'order/list/completed',
'order/list/all',
'order/trade-detail',
],
'delete': [
'order/cancel',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.0025,
'maker': 0.0,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'commonCurrencies': {
'COS': 'COSS',
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetExchangeInfo(params)
#
# { timezone: "UTC",
# server_time: 1545171487108,
# rate_limits: [{ type: "REQUESTS",
# interval: "MINUTE",
# limit: 1000 }],
# base_currencies: [{currency_code: "BTC", minimum_total_order: "0.0001"},
# {currency_code: "USDT", minimum_total_order: "1"},
# {currency_code: "EUR", minimum_total_order: "1"}],
# coins: [{ currency_code: "ADI",
# name: "Aditus",
# minimum_order_amount: "0.00000001"},
# ...
# { currency_code: "NPXSXEM",
# name: "PundiX-XEM",
# minimum_order_amount: "0.00000001" } ],
# symbols: [{ symbol: "ADI_BTC",
# amount_limit_decimal: 0,
# price_limit_decimal: 8,
# allow_trading: True },
# ...
# { symbol: "ETH_GUSD",
# amount_limit_decimal: 5,
# price_limit_decimal: 3,
# allow_trading: True } ] }
#
result = []
markets = self.safe_value(response, 'symbols', [])
baseCurrencies = self.safe_value(response, 'base_currencies', [])
baseCurrenciesByIds = self.index_by(baseCurrencies, 'currency_code')
currencies = self.safe_value(response, 'coins', [])
currenciesByIds = self.index_by(currencies, 'currency_code')
for i in range(0, len(markets)):
market = markets[i]
marketId = market['symbol']
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'amount_limit_decimal'),
'price': self.safe_integer(market, 'price_limit_decimal'),
}
active = self.safe_value(market, 'allow_trading', False)
baseCurrency = self.safe_value(baseCurrenciesByIds, baseId, {})
minCost = self.safe_float(baseCurrency, 'minimum_total_order')
currency = self.safe_value(currenciesByIds, baseId, {})
defaultMinAmount = math.pow(10, -precision['amount'])
minAmount = self.safe_float(currency, 'minimum_order_amount', defaultMinAmount)
result.append({
'symbol': symbol,
'id': marketId,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.webGetCoinsGetinfoAll(params)
#
# [{ currency_code: "VET",
# name: "VeChain",
# buy_limit: 0,
# sell_limit: 0,
# usdt: 0,
# transaction_time_limit: 5,
# status: "trade",
# withdrawn_fee: "0.6",
# minimum_withdrawn_amount: "1.2",
# minimum_deposit_amount: "0.6",
# minimum_order_amount: "0.00000001",
# decimal_format: "0.########",
# token_type: null, # "erc", "eos", "stellar", "tron", "ripple"...
# buy_at: 0,
# sell_at: 0,
# min_rate: 0,
# max_rate: 0,
# allow_withdrawn: False,
# allow_deposit: False,
# explorer_website_mainnet_link: null,
# explorer_website_testnet_link: null,
# deposit_block_confirmation: "6",
# withdraw_block_confirmation: "0",
# icon_url: "https://s2.coinmarketcap.com/static/img/coins/32x32/3077.png",
# is_fiat: False,
# allow_sell: True,
# allow_buy: True }]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
currencyId = self.safe_string(currency, 'currency_code')
code = self.safe_currency_code(currencyId)
name = self.safe_string(currency, 'name')
allowBuy = self.safe_value(currency, 'allow_buy')
allowSell = self.safe_value(currency, 'allow_sell')
allowWithdrawals = self.safe_value(currency, 'allow_withdrawn')
allowDeposits = self.safe_value(currency, 'allow_deposit')
active = allowBuy and allowSell and allowWithdrawals and allowDeposits
fee = self.safe_float(currency, 'withdrawn_fee')
type = self.safe_string(currency, 'token_type')
#
# decimal_format can be anything...
#
# 0.########
# #.########
# 0.##
# ''(empty string)
# 0.000000
# null(None)
# 0.0000
# 0.###
#
decimalFormat = self.safe_string(currency, 'decimal_format')
precision = 8
if decimalFormat is not None:
parts = decimalFormat.split('.')
numParts = len(parts) # transpiler workaround for array lengths
if numParts > 1:
if len(parts[1]) > 1:
precision = len(parts[1])
result[code] = {
'id': currencyId,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'type': type,
'limits': {
'amount': {
'min': self.safe_float(currency, 'minimum_order_amount'),
'max': None,
},
'withdraw': {
'min': self.safe_float(currency, 'minimum_withdrawn_amount'),
'max': None,
},
},
}
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.tradeGetAccountBalances(params)
#
# [{currency_code: "ETH",
# address: "0x6820511d43111a941d3e187b9e36ec64af763bde", # deposit address
# total: "0.20399125",
# available: "0.20399125",
# in_order: "0",
# memo: null }, # tag, if any
# {currency_code: "ICX",
# address: "",
# total: "0",
# available: "0",
# in_order: "0",
# memo: null } ]
#
result = {}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
total = self.safe_float(balance, 'total')
used = self.safe_float(balance, 'in_order')
free = self.safe_float(balance, 'available')
result[code] = {
'total': total,
'used': used,
'free': free,
}
return self.parse_balance(result)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
int(ohlcv[0]), # timestamp
float(ohlcv[1]), # Open
float(ohlcv[2]), # High
float(ohlcv[3]), # Low
float(ohlcv[4]), # Close
float(ohlcv[5]), # base Volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'tt': self.timeframes[timeframe],
}
response = await self.engineGetCs(self.extend(request, params))
#
# { tt: "1m",
# symbol: "ETH_BTC",
# nextTime: 1545138960000,
# series: [[ 1545138960000,
# "0.02705000",
# "0.02705000",
# "0.02705000",
# "0.02705000",
# "0.00000000" ],
# ...
# [ 1545168900000,
# "0.02684000",
# "0.02684000",
# "0.02684000",
# "0.02684000",
# "0.00000000" ] ],
# limit: 500 }
#
return self.parse_ohlcvs(response['series'], market, timeframe, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
marketId = self.market_id(symbol)
request = {'symbol': marketId}
# limit argument is not supported on COSS's end
response = await self.engineGetDp(self.extend(request, params))
#
# {symbol: "COSS_ETH",
# asks: [["0.00065200", "214.15000000"],
# ["0.00065300", "645.45000000"],
# ...
# ["0.00076400", "380.00000000"],
# ["0.00076900", "25.00000000"] ],
# limit: 100,
# bids: [["0.00065100", "666.99000000"],
# ["0.00065000", "1171.93000000"],
# ...
# ["0.00037700", "3300.00000000"],
# ["0.00037600", "2010.82000000"] ],
# time: 1545180569354 }
#
timestamp = self.safe_integer(response, 'time')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
#
# {MarketName: "COSS-ETH",
# High: 0.00066,
# Low: 0.000628,
# BaseVolume: 131.09652674,
# Last: 0.000636,
# TimeStamp: "2018-12-19T05:16:41.369Z",
# Volume: 206126.6143710692,
# Ask: "0.00063600",
# Bid: "0.00063400",
# PrevDay: 0.000636 }
#
timestamp = self.parse8601(self.safe_string(ticker, 'TimeStamp'))
symbol = None
marketId = self.safe_string(ticker, 'MarketName')
if marketId is not None:
marketId = marketId.replace('-', '_')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is None:
if marketId is not None:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
previous = self.safe_float(ticker, 'PrevDay')
last = self.safe_float(ticker, 'Last')
change = None
percentage = None
if last is not None:
if previous is not None:
change = last - previous
if previous > 0:
percentage = (change / previous) * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'High'),
'low': self.safe_float(ticker, 'Low'),
'bid': self.safe_float(ticker, 'Bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'Ask'),
'askVolume': None,
'vwap': None,
'open': previous,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'Volume'),
'quoteVolume': self.safe_float(ticker, 'BaseVolume'),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.exchangeGetGetmarketsummaries(params)
#
# {success: True,
# message: "",
# result: [{MarketName: "COSS-ETH",
# High: 0.00066,
# Low: 0.000628,
# BaseVolume: 131.09652674,
# Last: 0.000636,
# TimeStamp: "2018-12-19T05:16:41.369Z",
# Volume: 206126.6143710692,
# Ask: "0.00063600",
# Bid: "0.00063400",
# PrevDay: 0.000636 },
# ...
# {MarketName: "XLM-BTC",
# High: 0.0000309,
# Low: 0.0000309,
# BaseVolume: 0,
# Last: 0.0000309,
# TimeStamp: "2018-12-19T02:00:02.145Z",
# Volume: 0,
# Ask: "0.00003300",
# Bid: "0.00003090",
# PrevDay: 0.0000309 } ],
# volumes: [{CoinName: "ETH", Volume: 668.1928095999999}, # these are overall exchange volumes
# {CoinName: "USD", Volume: 9942.58480324},
# {CoinName: "BTC", Volume: 43.749184570000004},
# {CoinName: "COSS", Volume: 909909.26644574},
# {CoinName: "EUR", Volume: 0},
# {CoinName: "TUSD", Volume: 2613.3395026999997},
# {CoinName: "USDT", Volume: 1017152.07416519},
# {CoinName: "GUSD", Volume: 1.80438},
# {CoinName: "XRP", Volume: 15.95508},
# {CoinName: "GBP", Volume: 0},
# {CoinName: "USDC", Volume: 0} ],
# t: 1545196604371 }
#
tickers = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_ticker(self, symbol, params={}):
tickers = await self.fetch_tickers([symbol], params)
return tickers[symbol]
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.engineGetHt(self.extend(request, params))
#
# { symbol: "COSS_ETH",
# limit: 100,
# history: [{ id: 481321,
# price: "0.00065100",
# qty: "272.92000000",
# isBuyerMaker: False,
# time: 1545180845019 },
# { id: 481322,
# price: "0.00065200",
# qty: "1.90000000",
# isBuyerMaker: True,
# time: 1545180847535},
# ...
# { id: 481420,
# price: "0.00065300",
# qty: "2.00000000",
# isBuyerMaker: True,
# time: 1545181167702} ],
# time: 1545181171274 }
#
return self.parse_trades(response['history'], market, since, limit)
def parse_trade_fee(self, fee):
if fee is None:
return fee
parts = fee.split(' ')
numParts = len(parts)
cost = parts[0]
code = None
if numParts > 1:
code = self.safe_currency_code(parts[1])
return {
'cost': cost,
'currency': code,
}
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# { id: 481322,
# price: "0.00065200",
# qty: "1.90000000",
# isBuyerMaker: True,
# time: 1545180847535}
#
# fetchOrderTrades(private)
#
# [{ hex_id: null,
# symbol: "COSS_ETH",
# order_id: "ad6f6b47-3def-4add-a5d5-2549a9df1593",
# order_side: "BUY",
# price: "0.00065900",
# quantity: "10",
# fee: "0.00700000 COSS",
# additional_fee: "0.00000461 ETH",
# total: "0.00659000 ETH",
# timestamp: 1545152356075 }]
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_integer(trade, 'time')
orderId = self.safe_string(trade, 'order_id')
side = self.safe_string_lower(trade, 'order_side')
symbol = None
marketId = self.safe_string(trade, 'symbol')
if marketId is not None:
market = self.safe_value(self.markets_by_id, marketId, market)
if market is None:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
elif market is not None:
symbol = market['symbol']
cost = None
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'qty', 'quantity')
if amount is not None:
if price is not None:
cost = price * amount
result = {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
fee = self.parse_trade_fee(self.safe_string(trade, 'fee'))
if fee is not None:
additionalFee = self.parse_trade_fee(self.safe_string(trade, 'additional_fee'))
if additionalFee is None:
result['fee'] = fee
else:
result['fees'] = [
fee,
additionalFee,
]
return result
async def fetch_orders_by_type(self, type, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# 'from_id': 'b2a2d379-f9b6-418b-9414-cbf8330b20d1', # string(uuid), fetchOrders(all orders) only
# 'page': 0, # different pagination in fetchOpenOrders and fetchClosedOrders
# 'limit': 50, # optional, max = default = 50
'symbol': market['id'], # required
}
if limit is not None:
request['limit'] = limit # max = default = 50
method = 'tradePostOrderList' + type
response = await getattr(self, method)(self.extend(request, params))
#
# fetchOrders, fetchClosedOrders
#
# [{ hex_id: "5c192784330fe51149f556bb",
# order_id: "5e46e1b1-93d5-4656-9b43-a5635b08eae9",
# account_id: "a0c20128-b9e0-484e-9bc8-b8bb86340e5b",
# order_symbol: "COSS_ETH",
# order_side: "BUY",
# status: "filled",
# createTime: 1545152388019,
# type: "limit",
# timeMatching: 0,
# order_price: "0.00065900",
# order_size: "10",
# executed: "10",
# stop_price: "0.00000000",
# avg: "0.00065900",
# total: "0.00659000 ETH" } ]
#
# fetchOpenOrders
#
# {
# "total": 2,
# "list": [
# {
# "order_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4ty",
# "account_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4a6",
# "order_symbol": "eth-btc",
# "order_side": "BUY",
# "status": "OPEN",
# "createTime": 1538114348750,
# "type": "limit",
# "order_price": "0.12345678",
# "order_size": "10.12345678",
# "executed": "0",
# "stop_price": "02.12345678",
# "avg": "1.12345678",
# "total": "2.12345678"
# }
# ]
# }
#
# the following code is to handle the above difference in response formats
orders = None
if isinstance(response, list):
orders = response
else:
orders = self.safe_value(response, 'list', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_type('All', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_type('Completed', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_type('Open', symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': id,
}
response = await self.tradePostOrderDetails(self.extend(request, params))
return self.parse_order(response)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': id,
}
response = await self.tradePostOrderTradeDetail(self.extend(request, params))
#
# [{ hex_id: null,
# symbol: "COSS_ETH",
# order_id: "ad6f6b47-3def-4add-a5d5-2549a9df1593",
# order_side: "BUY",
# price: "0.00065900",
# quantity: "10",
# fee: "0.00700000 COSS",
# additional_fee: "0.00000461 ETH",
# total: "0.00659000 ETH",
# timestamp: 1545152356075 }]
#
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
if status is None:
return status
statuses = {
'OPEN': 'open',
'CANCELLED': 'canceled',
'FILLED': 'closed',
'PARTIAL_FILL': 'closed',
'CANCELLING': 'open',
}
return self.safe_string(statuses, status.upper(), status)
def parse_order(self, order, market=None):
#
# { hex_id: "5c192784330fe51149f556bb", # missing in fetchOpenOrders
# order_id: "5e46e1b1-93d5-4656-9b43-a5635b08eae9",
# account_id: "a0c20128-b9e0-484e-9bc8-b8bb86340e5b",
# order_symbol: "COSS_ETH", # coss-eth in docs
# order_side: "BUY",
# status: "filled",
# createTime: 1545152388019,
# type: "limit",
# timeMatching: 0, # missing in fetchOpenOrders
# order_price: "0.00065900",
# order_size: "10",
# executed: "10",
# stop_price: "0.00000000",
# avg: "0.00065900",
# total: "0.00659000 ETH" }
#
id = self.safe_string(order, 'order_id')
symbol = None
marketId = self.safe_string(order, 'order_symbol')
if marketId is None:
if market is not None:
symbol = market['symbol']
else:
# a minor workaround for lowercase eth-btc symbols
marketId = marketId.upper()
marketId = marketId.replace('-', '_')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is None:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'createTime')
status = self.parse_order_status(self.safe_string(order, 'status'))
price = self.safe_float(order, 'order_price')
filled = self.safe_float(order, 'executed')
type = self.safe_string(order, 'type')
amount = self.safe_float(order, 'order_size')
remaining = None
if amount is not None:
if filled is not None:
remaining = amount - filled
average = self.safe_float(order, 'avg')
side = self.safe_string_lower(order, 'order_side')
cost = self.safe_float(order, 'total')
fee = None
trades = None
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'order_symbol': market['id'],
'order_size': self.amount_to_precision(symbol, amount),
'order_side': side.upper(),
'type': type,
}
if price is not None:
request['order_price'] = self.price_to_precision(symbol, price)
response = await self.tradePostOrderAdd(self.extend(request, params))
#
# {
# "order_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4ty",
# "account_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4a6",
# "order_symbol": "eth-btc",
# "order_side": "BUY",
# "status": "OPEN",
# "createTime": 1538114348750,
# "type": "limit",
# "order_price": "0.12345678",
# "order_size": "10.12345678",
# "executed": "0",
# "stop_price": "02.12345678",
# "avg": "1.12345678",
# "total": "2.12345678"
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'order_symbol': market['id'],
}
response = await self.tradeDeleteOrderCancel(self.extend(request, params))
#
# {order_symbol: "COSS_ETH",
# order_id: "30f2d698-39a0-4b9f-a3a6-a179542373bd",
# order_size: 0,
# account_id: "a0c20128-b9e0-484e-9bc8-b8bb86340e5b",
# timestamp: 1545202728814,
# recvWindow: null }
#
return self.parse_order(response)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + path
if api == 'trade':
self.check_required_credentials()
timestamp = self.nonce()
query = self.extend({
'timestamp': timestamp, # required(int64)
# 'recvWindow': 10000, # optional(int32)
}, params)
request = None
if method == 'GET':
request = self.urlencode(query)
url += '?' + request
else:
request = self.json(query)
body = request
headers = {
'Signature': self.hmac(self.encode(request), self.encode(self.secret)),
'Authorization': self.apiKey,
}
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
py | b41284b2fb996161b968374f6043bf0edd8057ea | import random
list = []
for x in range(0,20):
list.append(random.randint(0,1000))
print("\nPrinting total list: \n", list[:],end="\n\n")
print("\nPrinting after 5th index: (Excluding 5th index)\n", list[6:],end="\n\n")
print("\nPrinting before 6th index: (Excluding 6th index)\n", list[:6],end="\n\n")
print("\nPrinting between 2nd and 8th index: (Including 2nd and 8th) \n", list[2:9],end="\n\n")
|
py | b4128573022c5cd02a5e9932f9f5b37e68531562 | import numpy as np
import sys
day4_in = np.loadtxt('day4_input.txt', dtype=int, delimiter='-')
PART_ONE = False
def has_adjacent_digits(password):
password = str(password)
for i in range(len(str(password))-1):
if (password[i] == password[i+1]):
if PART_ONE:
return True
return check_larger_group(password, password[i], i)
return False
def has_increasing_digits(password):
password = str(password)
for i in range(len(str(password))-1):
if int(password[i]) > int(password[i+1]):
return False
return True
def check_larger_group(password, match, match_pos):
if (password.count(match) == 2):
return True
if password[match_pos+2] == match:
j = match_pos
while j < len(password):
if password[j] == match:
j += 1
else:
break
remaining_password = password[:match_pos] + password[j:]
return has_adjacent_digits(remaining_password)
valid_passwords = 0
for password in range(day4_in[0], day4_in[1]):
if has_increasing_digits(password) and has_adjacent_digits(password):
valid_passwords += 1
print(f"Range: {day4_in[0]} - {day4_in[1]}")
print(f"Valid Passwords: {valid_passwords}") |
py | b412859836f5b117f4c91fe6968759d74f47b89b | """RMPシミュレーション"""
#from collections import deque
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anm
import scipy.integrate as integrate
import tqdm
import time
import environment
from kinematics import BaxterRobotArmKinematics
import rmp
class PositinData:
def __init__(self,):
self.x = []
self.y = []
self.z = []
def add(self, X):
self.x.append(X[0, 0])
self.y.append(X[1, 0])
self.z.append(X[2, 0])
class FrameData:
def __init__(self, arm):
self._get_joint_position(arm)
self._get_cpoints_position(arm)
def _get_joint_position(self, arm):
self.joint_positions_list = PositinData()
for o in arm.get_joint_positions():
self.joint_positions_list.add(o)
return
def _get_cpoints_position(self, arm):
self.cpoints_potisions_list = []
for cpoints in arm.cpoints_x:
_cs = PositinData()
for c in cpoints:
_cs.add(c)
self.cpoints_potisions_list.append(_cs)
return
class SimulationData:
def __init__(self,):
self.data = []
self.ee = PositinData()
self.command = []
pass
def add_data(self, arm, ddq=None):
self.data.append(FrameData(arm))
self.ee.add(arm.Ts_Wo[-1].o)
if ddq is not None:
self.command.append(ddq)
return
class Simulator:
""""""
def __init__(self, isLeft, TIME_SPAN, TIME_INTERVAL):
self.isLeft = isLeft
self.TIME_SPAN = TIME_SPAN
self.TIME_INTERVAL = TIME_INTERVAL
return
def set_controller(self, rmp_param):
"""rmpをセット"""
print('制御器セット中...')
start = time.time()
self.rmps = []
for i in range(8):
p = rmp_param[i]
if p['goal_attractor'] is None:
goal_attractor = None
elif p['goal_attractor']['name'] == 'Original':
goal_attractor = rmp.OriginalRMPAttractor(
**p['goal_attractor']
)
elif p['goal_attractor']['name'] == 'fromGDS':
goal_attractor = rmp.RMPfromGDSAttractor(
**p['goal_attractor']
)
if p['collision_avoidance'] is None:
collision_avoidance = None
elif p['collision_avoidance']['name'] == 'Original':
collision_avoidance = rmp.OriginalRMPCollisionAvoidance(
**p['collision_avoidance']
)
elif p['collision_avoidance']['name'] == 'fromGDS':
collision_avoidance = rmp.RMPfromGDSCollisionAvoidance(
**p['collision_avoidance']
)
self.rmps.append(
rmp.RMP(goal_attractor, collision_avoidance,)
)
p = rmp_param[-1]
if p['joint_limit_avoidance'] is None:
self.joint_limit_avoidance_RMP = None
elif p['joint_limit_avoidance']['name'] == 'Original':
self.joint_limit_avoidance_RMP = rmp.OriginalRMPJointLimitAvoidance(
**p['joint_limit_avoidance']
)
elif p['joint_limit_avoidance']['name'] == 'fromGDS':
self.joint_limit_avoidance_RMP = rmp.RMPfromGDSJointLimitAvoidance(
**p['joint_limit_avoidance']
)
print('セット完了')
print('セット時間 = ', time.time() - start, '\n')
return
def set_environment(self, env_param):
"""目標,障害物をセット"""
print('環境セット中...')
start = time.time()
goal_param = env_param['goal']
obs_param = env_param['obstacle']
#print(goal_param)
self.gl_goal = environment.Goal(**goal_param).goal
#self.gl_goal = np.array([[0.3, -0.75, 1]]).T
self.obs = environment.set_obstacle(obs_param)
if self.obs is not None:
self.obs_plot = np.concatenate(self.obs, axis=1)
print('セット完了')
print('セット時間 = ', time.time() - start, '\n')
return
def run_simulation(self,):
self.dobs = np.zeros((3, 1))
t = np.arange(0.0, self.TIME_SPAN, self.TIME_INTERVAL)
arm = BaxterRobotArmKinematics(self.isLeft)
def _eom(t, state):
"""scipyに渡すやつ"""
# 進捗報告(計算の無駄)
# if int(t/self.TIME_INTERVAL) % 100 == 0:
# print("t = ", '{:.2f}'.format(t))
print(t)
q = np.array([state[0:7]]).T
dq = np.array([state[7:14]]).T
arm.update_all(q, dq) # ロボットアームの全情報更新
pulled_f_all = []
pulled_M_all = []
for i in range(8):
_rmp = self.rmps[i]
for x, dx, J, dJ, in zip(
arm.cpoints_x[i],
arm.cpoints_dx[i],
arm.Jos_cpoints[i],
arm.Jos_cpoints_diff_by_t[i],
):
if self.obs is not None and _rmp.collision_avoidance is not None:
for o in self.obs:
f, M = _rmp.collision_avoidance.get_natural(x, dx, o, self.dobs)
_pulled_f, _pulled_M = rmp.pullback(f, M, J, dJ, dq)
pulled_f_all.append(_pulled_f)
pulled_M_all.append(_pulled_M)
if _rmp.goal_attractor is not None:
f, M = _rmp.goal_attractor.get_natural(x, dx, self.gl_goal(t), self.dobs)
_pulled_f, _pulled_M = rmp.pullback(f, M, J, dJ, dq)
pulled_f_all.append(_pulled_f)
pulled_M_all.append(_pulled_M)
pulled_f_all = np.sum(pulled_f_all, axis=0)
pulled_M_all = np.sum(pulled_M_all, axis=0)
# ジョイント制限
if self.joint_limit_avoidance_RMP is not None:
f, M = self.joint_limit_avoidance_RMP.get_natural(q, dq, arm.q_max, arm.q_min)
pulled_f_all += f
pulled_M_all += M
ddq = np.linalg.pinv(pulled_M_all) @ pulled_f_all
dstate = np.concatenate([dq, ddq], axis=0)
dstate = np.ravel(dstate).tolist()
return dstate
### scipy使用 ###
print("シミュレーション実行中...")
start = time.time()
self.sol = integrate.solve_ivp(
fun=_eom,
t_span=(0.0, self.TIME_SPAN),
y0=np.ravel(np.concatenate([arm.q, arm.dq])).tolist(),
method='RK45',
#method='LSODA',
t_eval=t,
)
print("シミュレーション実行終了")
print("シミュレーション実行時間 = ", time.time() - start)
print("")
# ### 自作のオイラー法使用 ###
# class JisakuSol:
# """solve_ivpのやつ"""
# y = [
# [], [], [], [], [], [], [], [], [], [], [], [], [], [],
# ]
# print("シミュレーション実行中...")
# start = time.time()
# self.sol = JisakuSol()
# state = np.ravel(np.concatenate([arm.q, arm.dq])).tolist()
# for i in range(14):
# self.sol.y[i].append(state[i])
# for i in range(int(self.TIME_SPAN / self.TIME_INTERVAL)):
# print(i)
# t = i * self.TIME_INTERVAL
# print(type(state))
# dstate = _eom(t, state)
# print(type(dstate))
# state = [state[j] + dstate[j]*self.TIME_INTERVAL for j in range(14)]
# for k in range(14):
# self.sol.y[k].append(state[k])
# print("シミュレーション実行終了")
# print("シミュレーション実行時間 = ", time.time() - start)
# print("")
# データ作成
print("データ作成中...")
start = time.time()
self.data = SimulationData()
_arm = BaxterRobotArmKinematics(isLeft=True)
for i in tqdm.tqdm(range(len(self.sol.t))):
#for i in range(len(self.sol.t)):
q = np.array([
[self.sol.y[0][i]],
[self.sol.y[1][i]],
[self.sol.y[2][i]],
[self.sol.y[3][i]],
[self.sol.y[4][i]],
[self.sol.y[5][i]],
[self.sol.y[6][i]],
])
dq = np.array([
[self.sol.y[7][i]],
[self.sol.y[8][i]],
[self.sol.y[9][i]],
[self.sol.y[10][i]],
[self.sol.y[11][i]],
[self.sol.y[12][i]],
[self.sol.y[13][i]],
])
_arm.update_all(q, dq)
self.data.add_data(_arm, ddq=None)
print("データ作成完了")
print("データ作成時間 = ", time.time() - start)
print("")
return
def plot_animation_2(self,):
"""グラフ作成(遅いかも)"""
start = time.time()
print("plot実行中...")
# アニメーション
fig_ani = plt.figure()
ax = fig_ani.add_subplot(projection = '3d')
ax.grid(True)
ax.set_xlabel('X[m]')
ax.set_ylabel('Y[m]')
ax.set_zlabel('Z[m]')
## 三軸のスケールを揃える
max_x = 1.0
min_x = -1.0
max_y = 0.2
min_y = -1.0
max_z = 2.0
min_z = 0.0
max_range = np.array([
max_x - min_x,
max_y - min_y,
max_z - min_z
]).max() * 0.5
mid_x = (max_x + min_x) * 0.5
mid_y = (max_y + min_y) * 0.5
mid_z = (max_z + min_z) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
# 時刻表示
time_template = 'time = %s [s]'
ax.set_box_aspect((1,1,1))
def _update(i):
"""アニメーションの関数"""
t = i * self.TIME_INTERVAL
ax.cla() # 遅いかも
ax.grid(True)
ax.set_xlabel('X[m]')
ax.set_ylabel('Y[m]')
ax.set_zlabel('Z[m]')
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
# 目標点
ax.scatter(
self.gl_goal(t)[0, 0], self.gl_goal(t)[1, 0], self.gl_goal(t)[2, 0],
s = 100, label = 'goal point', marker = '*', color = '#ff7f00',
alpha = 1, linewidths = 1.5, edgecolors = 'red')
# 障害物点
if self.obs is not None:
ax.scatter(
self.obs_plot[0, :], self.obs_plot[1, :], self.obs_plot[2, :],
label = 'obstacle point', marker = '.', color = 'k',)
d = self.data.data[i]
# ジョイント位置
ax.plot(
d.joint_positions_list.x,
d.joint_positions_list.y,
d.joint_positions_list.z,
"o-", color = "blue",
)
# 制御点
for p in d.cpoints_potisions_list:
ax.scatter(
p.x, p.y, p.z,
marker='o'
)
# グリッパー
ax.plot(
self.data.ee.x[0:i],
self.data.ee.y[0:i],
self.data.ee.z[0:i],
"-", color = "#ff7f00"
)
# 時刻表示
ax.text(
0.8, 0.12, 0.01,
time_template % (i * self.TIME_INTERVAL), size = 10
)
ax.set_box_aspect((1,1,1))
return
ani = anm.FuncAnimation(
fig = fig_ani,
func = _update,
frames = len(self.data.data),
#frames = int(self.TIME_SPAN / self.TIME_INTERVAL),
interval = self.TIME_INTERVAL * 0.001
)
#ani.save("hoge.gif", fps=1/self.TIME_INTERVAL, writer='pillow')
print("plot実行終了")
print("実行時間 = ", time.time() - start)
# # 入力履歴
# if len(self.data.command) != 1:
# fig_input = plt.figure()
# ax2 = fig_input.add_subplot(111)
# _c = np.concatenate(self.data.command, axis=1)
# for i in range(7):
# ax2.plot(_c[i, :], label=str(i+1))
# ax2.grid(True)
# ax2.legend()
# 最終結果
fig_rezult = plt.figure()
ax_rezult = fig_rezult.add_subplot(projection='3d')
ax_rezult.grid(True)
ax_rezult.set_xlabel('X[m]')
ax_rezult.set_ylabel('Y[m]')
ax_rezult.set_zlabel('Z[m]')
## 三軸のスケールを揃える
max_x = 1.0
min_x = -1.0
max_y = 0.2
min_y = -1.0
max_z = 2.0
min_z = 0.0
max_range = np.array([
max_x - min_x,
max_y - min_y,
max_z - min_z
]).max() * 0.5
mid_x = (max_x + min_x) * 0.5
mid_y = (max_y + min_y) * 0.5
mid_z = (max_z + min_z) * 0.5
ax_rezult.set_xlim(mid_x - max_range, mid_x + max_range)
ax_rezult.set_ylim(mid_y - max_range, mid_y + max_range)
ax_rezult.set_zlim(mid_z - max_range, mid_z + max_range)
i = int(self.TIME_SPAN/self.TIME_INTERVAL)-1
t = self.TIME_SPAN
#目標点
ax_rezult.scatter(
self.gl_goal(t)[0, 0], self.gl_goal(t)[1, 0], self.gl_goal(t)[2, 0],
s = 100, label = 'goal point', marker = '*', color = '#ff7f00',
alpha = 1, linewidths = 1.5, edgecolors = 'red')
# 障害物点
if self.obs is not None:
ax_rezult.scatter(
self.obs_plot[0, :], self.obs_plot[1, :], self.obs_plot[2, :],
label = 'obstacle point', marker = '.', color = 'k',)
d = self.data.data[i]
# ジョイント位置
ax_rezult.plot(
d.joint_positions_list.x,
d.joint_positions_list.y,
d.joint_positions_list.z,
"o-", color = "blue",
)
# 制御点
for p in d.cpoints_potisions_list:
ax_rezult.scatter(
p.x, p.y, p.z,
marker='o'
)
# グリッパー
ax_rezult.plot(
self.data.ee.x[0:i],
self.data.ee.y[0:i],
self.data.ee.z[0:i],
"-", color = "#ff7f00"
)
# 時刻表示
ax_rezult.text(
0.8, 0.12, 0.01,
time_template % (i * self.TIME_INTERVAL), size = 10
)
ax_rezult.set_box_aspect((1,1,1))
plt.show()
return
def main():
simu = Simulator()
simu.run_simulation()
simu.plot_animation_2()
if __name__ == "__main__":
main()
|
py | b4128805915b4b63b24a0497b2b2328bf1cafc37 | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class SMISApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api211_smi_s_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SmisGetResponse
"""List SMI-S settings
Displays the SMI-S settings, including the name of the array and whether SLP and WBEM-HTTPS are enabled.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api211_smi_s_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: SmisGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api211_smi_s_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api211_smi_s_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.11/smi-s', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SmisGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api211_smi_s_patch_with_http_info(
self,
smi_s=None, # type: models.Smis
authorization=None, # type: str
x_request_id=None, # type: str
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SmisResponse
"""Modify SLP and SMI-S
Modifies the Service Location Protocol (SLP) and the SMI-S provider, enabling or disabling them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api211_smi_s_patch_with_http_info(smi_s, async_req=True)
>>> result = thread.get()
:param Smis smi_s: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: SmisResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'smi_s' is set
if smi_s is None:
raise TypeError("Missing the required parameter `smi_s` when calling `api211_smi_s_patch`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'smi_s' in params:
body_params = params['smi_s']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.11/smi-s', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SmisResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
|
py | b4128852d583749c605bf78898f93efa49c21258 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_certificate_signing_request_spec import V1beta1CertificateSigningRequestSpec
class TestV1beta1CertificateSigningRequestSpec(unittest.TestCase):
""" V1beta1CertificateSigningRequestSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CertificateSigningRequestSpec(self):
"""
Test V1beta1CertificateSigningRequestSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_certificate_signing_request_spec.V1beta1CertificateSigningRequestSpec()
pass
if __name__ == '__main__':
unittest.main()
|
py | b41288f2beb74089c4ac238a7a45875f5f19a5a8 | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class ReportResponseFailureReason(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'severity': 'str',
'code': 'str',
'message': 'str',
'details': 'dict(str, object)'
}
attribute_map = {
'severity': 'severity',
'code': 'code',
'message': 'message',
'details': 'details'
}
def __init__(self, severity=None, code=None, message=None, details=None, local_vars_configuration=None): # noqa: E501
"""ReportResponseFailureReason - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._severity = None
self._code = None
self._message = None
self._details = None
self.discriminator = None
if severity is not None:
self.severity = severity
if code is not None:
self.code = code
if message is not None:
self.message = message
if details is not None:
self.details = details
@property
def severity(self):
"""Gets the severity of this ReportResponseFailureReason. # noqa: E501
:return: The severity of this ReportResponseFailureReason. # noqa: E501
:rtype: str
"""
return self._severity
@severity.setter
def severity(self, severity):
"""Sets the severity of this ReportResponseFailureReason.
:param severity: The severity of this ReportResponseFailureReason. # noqa: E501
:type: str
"""
allowed_values = ["INFO", "SUCCESS", "WARNING", "ERROR"] # noqa: E501
if self.local_vars_configuration.client_side_validation and severity not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `severity` ({0}), must be one of {1}" # noqa: E501
.format(severity, allowed_values)
)
self._severity = severity
@property
def code(self):
"""Gets the code of this ReportResponseFailureReason. # noqa: E501
Code for e.g. programmatic handling of error conditions. # noqa: E501
:return: The code of this ReportResponseFailureReason. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ReportResponseFailureReason.
Code for e.g. programmatic handling of error conditions. # noqa: E501
:param code: The code of this ReportResponseFailureReason. # noqa: E501
:type: str
"""
self._code = code
@property
def message(self):
"""Gets the message of this ReportResponseFailureReason. # noqa: E501
Human-readable description of the entry. # noqa: E501
:return: The message of this ReportResponseFailureReason. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ReportResponseFailureReason.
Human-readable description of the entry. # noqa: E501
:param message: The message of this ReportResponseFailureReason. # noqa: E501
:type: str
"""
self._message = message
@property
def details(self):
"""Gets the details of this ReportResponseFailureReason. # noqa: E501
Additional data to interpret and handle the entry. # noqa: E501
:return: The details of this ReportResponseFailureReason. # noqa: E501
:rtype: dict(str, object)
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this ReportResponseFailureReason.
Additional data to interpret and handle the entry. # noqa: E501
:param details: The details of this ReportResponseFailureReason. # noqa: E501
:type: dict(str, object)
"""
self._details = details
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReportResponseFailureReason):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ReportResponseFailureReason):
return True
return self.to_dict() != other.to_dict()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.