repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
halfcrazy/sqlalchemy | test/orm/inheritance/test_basic.py | 20 | 85967 | import warnings
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy import exc as sa_exc, util, event
from sqlalchemy.orm import *
from sqlalchemy.orm.util import instance_str
from sqlalchemy.orm import exc as orm_exc, attributes
from sqlalchemy.testing.assertsql import AllOf, CompiledSQL, Or
from sqlalchemy.sql import table, column
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy import inspect
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.testing.util import gc_collect
class O2MTest(fixtures.MappedTest):
"""deals with inheritance and one-to-many relationships"""
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(20)))
blub = Table('blub', metadata,
Column('id', Integer, ForeignKey('bar.id'), primary_key=True),
Column('foo_id', Integer, ForeignKey('foo.id'), nullable=False),
Column('blub_data', String(20)))
def test_basic(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
def __repr__(self):
return "Foo id %d, data %s" % (self.id, self.data)
mapper(Foo, foo)
class Bar(Foo):
def __repr__(self):
return "Bar id %d, data %s" % (self.id, self.data)
mapper(Bar, bar, inherits=Foo)
class Blub(Bar):
def __repr__(self):
return "Blub id %d, data %s" % (self.id, self.data)
mapper(Blub, blub, inherits=Bar, properties={
'parent_foo':relationship(Foo)
})
sess = create_session()
b1 = Blub("blub #1")
b2 = Blub("blub #2")
f = Foo("foo #1")
sess.add(b1)
sess.add(b2)
sess.add(f)
b1.parent_foo = f
b2.parent_foo = f
sess.flush()
compare = ','.join([repr(b1), repr(b2), repr(b1.parent_foo),
repr(b2.parent_foo)])
sess.expunge_all()
l = sess.query(Blub).all()
result = ','.join([repr(l[0]), repr(l[1]),
repr(l[0].parent_foo), repr(l[1].parent_foo)])
eq_(compare, result)
eq_(l[0].parent_foo.data, 'foo #1')
eq_(l[1].parent_foo.data, 'foo #1')
class PolymorphicResolutionMultiLevel(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
class C(A):
__tablename__ = 'c'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
class D(B):
__tablename__ = 'd'
id = Column(Integer, ForeignKey('b.id'), primary_key=True)
def test_ordered_b_d(self):
a_mapper = inspect(self.classes.A)
eq_(
a_mapper._mappers_from_spec(
[self.classes.B, self.classes.D], None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
def test_a(self):
a_mapper = inspect(self.classes.A)
eq_(
a_mapper._mappers_from_spec(
[self.classes.A], None),
[a_mapper]
)
def test_b_d_selectable(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.B]
eq_(
a_mapper._mappers_from_spec(
spec,
self.classes.B.__table__.join(self.classes.D.__table__)
),
[inspect(self.classes.B), inspect(self.classes.D)]
)
def test_d_selectable(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D]
eq_(
a_mapper._mappers_from_spec(
spec,
self.classes.B.__table__.join(self.classes.D.__table__)
),
[inspect(self.classes.D)]
)
def test_reverse_d_b(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.B]
eq_(
a_mapper._mappers_from_spec(
spec, None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
mappers, selectable = a_mapper._with_polymorphic_args(spec=spec)
self.assert_compile(selectable,
"a LEFT OUTER JOIN b ON a.id = b.id "
"LEFT OUTER JOIN d ON b.id = d.id")
def test_d_b_missing(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D]
eq_(
a_mapper._mappers_from_spec(
spec, None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
mappers, selectable = a_mapper._with_polymorphic_args(spec=spec)
self.assert_compile(selectable,
"a LEFT OUTER JOIN b ON a.id = b.id "
"LEFT OUTER JOIN d ON b.id = d.id")
def test_d_c_b(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.C, self.classes.B]
ms = a_mapper._mappers_from_spec(spec, None)
eq_(
ms[-1], inspect(self.classes.D)
)
eq_(ms[0], a_mapper)
eq_(
set(ms[1:3]), set(a_mapper._inheriting_mappers)
)
class PolymorphicOnNotLocalTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x', String(10)),
Column('q', String(10)))
t2 = Table('t2', metadata,
Column('t2id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('y', String(10)),
Column('xid', ForeignKey('t1.id')))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Child(Parent):
pass
def test_non_col_polymorphic_on(self):
Parent = self.classes.Parent
t2 = self.tables.t2
assert_raises_message(
sa_exc.ArgumentError,
"Can't determine polymorphic_on "
"value 'im not a column' - no "
"attribute is mapped to this name.",
mapper,
Parent, t2, polymorphic_on="im not a column"
)
def test_polymorphic_on_non_expr_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
def go():
interface_m = mapper(Parent, t2,
polymorphic_on=lambda:"hi",
polymorphic_identity=0)
assert_raises_message(
sa_exc.ArgumentError,
"Only direct column-mapped property or "
"SQL expression can be passed for polymorphic_on",
go
)
def test_polymorphic_on_not_present_col(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
def go():
t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias()
interface_m = mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join_2),
polymorphic_identity=0)
assert_raises_message(
sa_exc.InvalidRequestError,
"Could not map polymorphic_on column 'x' to the mapped table - "
"polymorphic loads will not function properly",
go
)
def test_polymorphic_on_only_in_with_poly(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
# if its in the with_polymorphic, then its OK
mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join),
polymorphic_identity=0)
def test_polymorpic_on_not_in_with_poly(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
# if with_polymorphic, but its not present, not OK
def go():
t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias()
interface_m = mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join_2),
polymorphic_identity=0)
assert_raises_message(
sa_exc.InvalidRequestError,
"Could not map polymorphic_on column 'x' "
"to the mapped table - "
"polymorphic loads will not function properly",
go
)
def test_polymorphic_on_expr_explicit_map(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, properties={
"discriminator":column_property(expr)
}, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_no_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_w_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
]).label(None)
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_no_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_w_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
]).label(None)
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_column_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
cprop = column_property(expr)
mapper(Parent, t1, properties={
"discriminator":cprop
}, polymorphic_identity="parent",
polymorphic_on=cprop)
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_column_str_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
cprop = column_property(expr)
mapper(Parent, t1, properties={
"discriminator":cprop
}, polymorphic_identity="parent",
polymorphic_on="discriminator")
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_synonym(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
cprop = column_property(t1.c.x)
assert_raises_message(
sa_exc.ArgumentError,
"Only direct column-mapped property or "
"SQL expression can be passed for polymorphic_on",
mapper, Parent, t1, properties={
"discriminator":cprop,
"discrim_syn":synonym(cprop)
}, polymorphic_identity="parent",
polymorphic_on="discrim_syn")
def _roundtrip(self, set_event=True, parent_ident='parent', child_ident='child'):
Parent, Child = self.classes.Parent, self.classes.Child
if set_event:
@event.listens_for(Parent, "init", propagate=True)
def set_identity(instance, *arg, **kw):
ident = object_mapper(instance).polymorphic_identity
if ident == 'parent':
instance.x = parent_ident
elif ident == 'child':
instance.x = child_ident
else:
assert False, "Got unexpected identity %r" % ident
s = Session(testing.db)
s.add_all([
Parent(q="p1"),
Child(q="c1", y="c1"),
Parent(q="p2"),
])
s.commit()
s.close()
eq_(
[type(t) for t in s.query(Parent).order_by(Parent.id)],
[Parent, Child, Parent]
)
eq_(
[type(t) for t in s.query(Child).all()],
[Child]
)
class SortOnlyOnImportantFKsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('b_id', Integer,
ForeignKey('b.id', use_alter=True, name='b'))
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
@classmethod
def setup_classes(cls):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
b_id = Column(Integer, ForeignKey('b.id'))
class B(A):
__tablename__ = "b"
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
__mapper_args__ = {'inherit_condition': id == A.id}
cls.classes.A = A
cls.classes.B = B
def test_flush(self):
s = Session(testing.db)
s.add(self.classes.B())
s.flush()
class FalseDiscriminatorTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', Boolean, nullable=False))
def test_false_on_sub(self):
class Foo(object):
pass
class Bar(Foo):
pass
mapper(Foo, t1, polymorphic_on=t1.c.type, polymorphic_identity=True)
mapper(Bar, inherits=Foo, polymorphic_identity=False)
sess = create_session()
b1 = Bar()
sess.add(b1)
sess.flush()
assert b1.type is False
sess.expunge_all()
assert isinstance(sess.query(Foo).one(), Bar)
def test_false_on_base(self):
class Ding(object):pass
class Bat(Ding):pass
mapper(Ding, t1, polymorphic_on=t1.c.type, polymorphic_identity=False)
mapper(Bat, inherits=Ding, polymorphic_identity=True)
sess = create_session()
d1 = Ding()
sess.add(d1)
sess.flush()
assert d1.type is False
sess.expunge_all()
assert sess.query(Ding).one() is not None
class PolymorphicSynonymTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10), nullable=False),
Column('info', String(255)))
t2 = Table('t2', metadata,
Column('id', Integer, ForeignKey('t1.id'),
primary_key=True),
Column('data', String(10), nullable=False))
def test_polymorphic_synonym(self):
class T1(fixtures.ComparableEntity):
def info(self):
return "THE INFO IS:" + self._info
def _set_info(self, x):
self._info = x
info = property(info, _set_info)
class T2(T1):pass
mapper(T1, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1',
properties={
'info':synonym('_info', map_column=True)
})
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
sess = create_session()
at1 = T1(info='at1')
at2 = T2(info='at2', data='t2 data')
sess.add(at1)
sess.add(at2)
sess.flush()
sess.expunge_all()
eq_(sess.query(T2).filter(T2.info=='at2').one(), at2)
eq_(at2.info, "THE INFO IS:at2")
class PolymorphicAttributeManagementTest(fixtures.MappedTest):
"""Test polymorphic_on can be assigned, can be mirrored, etc."""
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('table_a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('class_name', String(50))
)
Table('table_b', metadata,
Column('id', Integer, ForeignKey('table_a.id'),
primary_key=True),
Column('class_name', String(50)),
)
Table('table_c', metadata,
Column('id', Integer, ForeignKey('table_b.id'),
primary_key=True),
Column('data', String(10))
)
@classmethod
def setup_classes(cls):
table_b, table_c, table_a = (cls.tables.table_b,
cls.tables.table_c,
cls.tables.table_a)
class A(cls.Basic):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
mapper(A, table_a,
polymorphic_on=table_a.c.class_name,
polymorphic_identity='a')
mapper(B, table_b, inherits=A,
polymorphic_on=table_b.c.class_name,
polymorphic_identity='b',
properties=dict(class_name=[table_a.c.class_name, table_b.c.class_name]))
mapper(C, table_c, inherits=B,
polymorphic_identity='c')
mapper(D, inherits=B,
polymorphic_identity='d')
def test_poly_configured_immediate(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
a = A()
b = B()
c = C()
eq_(a.class_name, 'a')
eq_(b.class_name, 'b')
eq_(c.class_name, 'c')
def test_base_class(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
assert isinstance(sess.query(B).first(), C)
sess.close()
assert isinstance(sess.query(A).first(), C)
def test_valid_assignment_upwards(self):
"""test that we can assign 'd' to a B, since B/D
both involve the same set of tables.
"""
D, B = self.classes.D, self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'd'
sess.add(b1)
sess.commit()
sess.close()
assert isinstance(sess.query(B).first(), D)
def test_invalid_assignment_downwards(self):
"""test that we warn on assign of 'b' to a C, since this adds
a row to the C table we'd never load.
"""
C = self.classes.C
sess = Session()
c1 = C()
c1.class_name = 'b'
sess.add(c1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush
)
def test_invalid_assignment_upwards(self):
"""test that we warn on assign of 'c' to a B, since we will have a
"C" row that has no joined row, which will cause object
deleted errors.
"""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'c'
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'c'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush
)
def test_entirely_oob_assignment(self):
"""test warn on an unknown polymorphic identity.
"""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'xyz'
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'xyz'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush
)
def test_not_set_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.data = 'foo'
sess.flush()
def test_validate_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.class_name = 'b'
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush
)
class CascadeTest(fixtures.MappedTest):
"""that cascades on polymorphic relationships continue
cascading along the path of the instance's mapper, not
the base mapper."""
@classmethod
def define_tables(cls, metadata):
global t1, t2, t3, t4
t1= Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
t2 = Table('t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('t1id', Integer, ForeignKey('t1.id')),
Column('type', String(30)),
Column('data', String(30))
)
t3 = Table('t3', metadata,
Column('id', Integer, ForeignKey('t2.id'),
primary_key=True),
Column('moredata', String(30)))
t4 = Table('t4', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('t3id', Integer, ForeignKey('t3.id')),
Column('data', String(30)))
def test_cascade(self):
class T1(fixtures.BasicEntity):
pass
class T2(fixtures.BasicEntity):
pass
class T3(T2):
pass
class T4(fixtures.BasicEntity):
pass
mapper(T1, t1, properties={
't2s':relationship(T2, cascade="all")
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(T3, t3, inherits=T2, polymorphic_identity='t3', properties={
't4s':relationship(T4, cascade="all")
})
mapper(T4, t4)
sess = create_session()
t1_1 = T1(data='t1')
t3_1 = T3(data ='t3', moredata='t3')
t2_1 = T2(data='t2')
t1_1.t2s.append(t2_1)
t1_1.t2s.append(t3_1)
t4_1 = T4(data='t4')
t3_1.t4s.append(t4_1)
sess.add(t1_1)
assert t4_1 in sess.new
sess.flush()
sess.delete(t1_1)
assert t4_1 in sess.deleted
sess.flush()
class M2OUseGetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30))
)
Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
)
Table('related', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('sub_id', Integer, ForeignKey('sub.id')),
)
def test_use_get(self):
base, sub, related = (self.tables.base,
self.tables.sub,
self.tables.related)
# test [ticket:1186]
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class Related(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='b')
mapper(Sub, sub, inherits=Base, polymorphic_identity='s')
mapper(Related, related, properties={
# previously, this was needed for the comparison to occur:
# the 'primaryjoin' looks just like "Sub"'s "get" clause (based on the Base id),
# and foreign_keys since that join condition doesn't actually have any fks in it
#'sub':relationship(Sub, primaryjoin=base.c.id==related.c.sub_id, foreign_keys=related.c.sub_id)
# now we can use this:
'sub':relationship(Sub)
})
assert class_mapper(Related).get_property('sub').strategy.use_get
sess = create_session()
s1 = Sub()
r1 = Related(sub=s1)
sess.add(r1)
sess.flush()
sess.expunge_all()
r1 = sess.query(Related).first()
s1 = sess.query(Sub).first()
def go():
assert r1.sub
self.assert_sql_count(testing.db, go, 0)
class GetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30)),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(20)))
blub = Table('blub', metadata,
Column('blub_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('foo_id', Integer, ForeignKey('foo.id')),
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('blub_data', String(20)))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(Foo):
pass
class Blub(Bar):
pass
def test_get_polymorphic(self):
self._do_get_test(True)
def test_get_nonpolymorphic(self):
self._do_get_test(False)
def _do_get_test(self, polymorphic):
foo, Bar, Blub, blub, bar, Foo = (self.tables.foo,
self.classes.Bar,
self.classes.Blub,
self.tables.blub,
self.tables.bar,
self.classes.Foo)
if polymorphic:
mapper(Foo, foo, polymorphic_on=foo.c.type, polymorphic_identity='foo')
mapper(Bar, bar, inherits=Foo, polymorphic_identity='bar')
mapper(Blub, blub, inherits=Bar, polymorphic_identity='blub')
else:
mapper(Foo, foo)
mapper(Bar, bar, inherits=Foo)
mapper(Blub, blub, inherits=Bar)
sess = create_session()
f = Foo()
b = Bar()
bl = Blub()
sess.add(f)
sess.add(b)
sess.add(bl)
sess.flush()
if polymorphic:
def go():
assert sess.query(Foo).get(f.id) is f
assert sess.query(Foo).get(b.id) is b
assert sess.query(Foo).get(bl.id) is bl
assert sess.query(Bar).get(b.id) is b
assert sess.query(Bar).get(bl.id) is bl
assert sess.query(Blub).get(bl.id) is bl
# test class mismatches - item is present
# in the identity map but we requested a subclass
assert sess.query(Blub).get(f.id) is None
assert sess.query(Blub).get(b.id) is None
assert sess.query(Bar).get(f.id) is None
self.assert_sql_count(testing.db, go, 0)
else:
# this is testing the 'wrong' behavior of using get()
# polymorphically with mappers that are not configured to be
# polymorphic. the important part being that get() always
# returns an instance of the query's type.
def go():
assert sess.query(Foo).get(f.id) is f
bb = sess.query(Foo).get(b.id)
assert isinstance(b, Foo) and bb.id==b.id
bll = sess.query(Foo).get(bl.id)
assert isinstance(bll, Foo) and bll.id==bl.id
assert sess.query(Bar).get(b.id) is b
bll = sess.query(Bar).get(bl.id)
assert isinstance(bll, Bar) and bll.id == bl.id
assert sess.query(Blub).get(bl.id) is bl
self.assert_sql_count(testing.db, go, 3)
class EagerLazyTest(fixtures.MappedTest):
"""tests eager load/lazy load of child items off inheritance mappers, tests that
LazyLoader constructs the right query condition."""
@classmethod
def define_tables(cls, metadata):
global foo, bar, bar_foo
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(30)))
bar_foo = Table('bar_foo', metadata,
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('foo_id', Integer, ForeignKey('foo.id'))
)
def test_basic(self):
class Foo(object): pass
class Bar(Foo): pass
foos = mapper(Foo, foo)
bars = mapper(Bar, bar, inherits=foos)
bars.add_property('lazy', relationship(foos, bar_foo, lazy='select'))
bars.add_property('eager', relationship(foos, bar_foo, lazy='joined'))
foo.insert().execute(data='foo1')
bar.insert().execute(id=1, data='bar1')
foo.insert().execute(data='foo2')
bar.insert().execute(id=2, data='bar2')
foo.insert().execute(data='foo3') #3
foo.insert().execute(data='foo4') #4
bar_foo.insert().execute(bar_id=1, foo_id=3)
bar_foo.insert().execute(bar_id=2, foo_id=4)
sess = create_session()
q = sess.query(Bar)
self.assert_(len(q.first().lazy) == 1)
self.assert_(len(q.first().eager) == 1)
class EagerTargetingTest(fixtures.MappedTest):
"""test a scenario where joined table inheritance might be
confused as an eagerly loaded joined table."""
@classmethod
def define_tables(cls, metadata):
Table('a_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('type', String(30), nullable=False),
Column('parent_id', Integer, ForeignKey('a_table.id'))
)
Table('b_table', metadata,
Column('id', Integer, ForeignKey('a_table.id'), primary_key=True),
Column('b_data', String(50)),
)
def test_adapt_stringency(self):
b_table, a_table = self.tables.b_table, self.tables.a_table
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(A, a_table, polymorphic_on=a_table.c.type, polymorphic_identity='A',
properties={
'children': relationship(A, order_by=a_table.c.name)
})
mapper(B, b_table, inherits=A, polymorphic_identity='B', properties={
'b_derived':column_property(b_table.c.b_data + "DATA")
})
sess=create_session()
b1=B(id=1, name='b1',b_data='i')
sess.add(b1)
sess.flush()
b2=B(id=2, name='b2', b_data='l', parent_id=1)
sess.add(b2)
sess.flush()
bid=b1.id
sess.expunge_all()
node = sess.query(B).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
sess.expunge_all()
node = sess.query(B).options(joinedload(B.children)).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
class FlushTest(fixtures.MappedTest):
"""test dependency sorting among inheriting mappers"""
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('email', String(128)),
Column('password', String(16)),
)
Table('roles', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('description', String(32))
)
Table('user_roles', metadata,
Column('user_id', Integer, ForeignKey('users.id'), primary_key=True),
Column('role_id', Integer, ForeignKey('roles.id'), primary_key=True)
)
Table('admins', metadata,
Column('admin_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id'))
)
def test_one(self):
admins, users, roles, user_roles = (self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles)
class User(object):pass
class Role(object):pass
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
sess = create_session()
adminrole = Role()
sess.add(adminrole)
sess.flush()
# create an Admin, and append a Role. the dependency processors
# corresponding to the "roles" attribute for the Admin mapper and the User mapper
# have to ensure that two dependency processors don't fire off and insert the
# many to many row twice.
a = Admin()
a.roles.append(adminrole)
a.password = 'admin'
sess.add(a)
sess.flush()
assert user_roles.count().scalar() == 1
def test_two(self):
admins, users, roles, user_roles = (self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles)
class User(object):
def __init__(self, email=None, password=None):
self.email = email
self.password = password
class Role(object):
def __init__(self, description=None):
self.description = description
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
# create roles
adminrole = Role('admin')
sess = create_session()
sess.add(adminrole)
sess.flush()
# create admin user
a = Admin(email='tim', password='admin')
a.roles.append(adminrole)
sess.add(a)
sess.flush()
a.password = 'sadmin'
sess.flush()
assert user_roles.count().scalar() == 1
class OptimizedGetOnDeferredTest(fixtures.MappedTest):
"""test that the 'optimized get' path accommodates deferred columns."""
@classmethod
def define_tables(cls, metadata):
Table(
"a", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table(
"b", metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('data', String(10))
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
@classmethod
def setup_mappers(cls):
A, B = cls.classes("A", "B")
a, b = cls.tables("a", "b")
mapper(A, a)
mapper(B, b, inherits=A, properties={
'data': deferred(b.c.data),
'expr': column_property(b.c.data + 'q', deferred=True)
})
def test_column_property(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data='x')
sess.add(b1)
sess.flush()
eq_(b1.expr, 'xq')
def test_expired_column(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data='x')
sess.add(b1)
sess.flush()
sess.expire(b1, ['data'])
eq_(b1.data, 'x')
class JoinedNoFKSortingTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("a", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table("b", metadata,
Column('id', Integer, primary_key=True)
)
Table("c", metadata,
Column('id', Integer, primary_key=True)
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(A):
pass
@classmethod
def setup_mappers(cls):
A, B, C = cls.classes.A, cls.classes.B, cls.classes.C
mapper(A, cls.tables.a)
mapper(B, cls.tables.b, inherits=A,
inherit_condition=cls.tables.a.c.id == cls.tables.b.c.id,
inherit_foreign_keys=cls.tables.b.c.id)
mapper(C, cls.tables.c, inherits=A,
inherit_condition=cls.tables.a.c.id == cls.tables.c.c.id,
inherit_foreign_keys=cls.tables.c.c.id)
def test_ordering(self):
B, C = self.classes.B, self.classes.C
sess = Session()
sess.add_all([B(), C(), B(), C()])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
AllOf(
CompiledSQL(
"INSERT INTO b (id) VALUES (:id)",
[{"id": 1}, {"id": 3}]
),
CompiledSQL(
"INSERT INTO c (id) VALUES (:id)",
[{"id": 2}, {"id": 4}]
)
)
)
class VersioningTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('version_id', Integer, nullable=False),
Column('value', String(40)),
Column('discriminator', Integer, nullable=False)
)
Table('subtable', metadata,
Column('id', None, ForeignKey('base.id'), primary_key=True),
Column('subdata', String(50))
)
Table('stuff', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent', Integer, ForeignKey('base.id'))
)
@testing.emits_warning(r".*updated rowcount")
@engines.close_open_connections
def test_save_update(self):
subtable, base, stuff = (self.tables.subtable,
self.tables.base,
self.tables.stuff)
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class Stuff(Base):
pass
mapper(Stuff, stuff)
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id,
polymorphic_identity=1, properties={
'stuff':relationship(Stuff)
})
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
sess.add(b1)
sess.add(s1)
sess.flush()
sess2 = create_session()
s2 = sess2.query(Base).get(s1.id)
s2.subdata = 'sess2 subdata'
s1.subdata = 'sess1 subdata'
sess.flush()
assert_raises(orm_exc.StaleDataError,
sess2.query(Base).with_lockmode('read').get,
s1.id)
if not testing.db.dialect.supports_sane_rowcount:
sess2.flush()
else:
assert_raises(orm_exc.StaleDataError, sess2.flush)
sess2.refresh(s2)
if testing.db.dialect.supports_sane_rowcount:
assert s2.subdata == 'sess1 subdata'
s2.subdata = 'sess2 subdata'
sess2.flush()
@testing.emits_warning(r".*(update|delete)d rowcount")
def test_delete(self):
subtable, base = self.tables.subtable, self.tables.base
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id, polymorphic_identity=1)
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
s2 = Sub(value='sub2', subdata='some other subdata')
sess.add(b1)
sess.add(s1)
sess.add(s2)
sess.flush()
sess2 = create_session()
s3 = sess2.query(Base).get(s1.id)
sess2.delete(s3)
sess2.flush()
s2.subdata = 'some new subdata'
sess.flush()
s1.subdata = 'some new subdata'
if testing.db.dialect.supports_sane_rowcount:
assert_raises(
orm_exc.StaleDataError,
sess.flush
)
else:
sess.flush()
class DistinctPKTest(fixtures.MappedTest):
"""test the construction of mapper.primary_key when an inheriting relationship
joins on a column other than primary key column."""
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
global person_table, employee_table, Person, Employee
person_table = Table("persons", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("name", String(80)),
)
employee_table = Table("employees", metadata,
Column("eid", Integer, primary_key=True, test_needs_autoincrement=True),
Column("salary", Integer),
Column("person_id", Integer, ForeignKey("persons.id")),
)
class Person(object):
def __init__(self, name):
self.name = name
class Employee(Person): pass
@classmethod
def insert_data(cls):
person_insert = person_table.insert()
person_insert.execute(id=1, name='alice')
person_insert.execute(id=2, name='bob')
employee_insert = employee_table.insert()
employee_insert.execute(id=2, salary=250, person_id=1) # alice
employee_insert.execute(id=3, salary=200, person_id=2) # bob
def test_implicit(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper)
assert list(class_mapper(Employee).primary_key) == [person_table.c.id]
def test_explicit_props(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper,
properties={'pid':person_table.c.id,
'eid':employee_table.c.eid})
self._do_test(False)
def test_explicit_composite_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table,
inherits=person_mapper,
properties=dict(id=[employee_table.c.eid, person_table.c.id]),
primary_key=[person_table.c.id, employee_table.c.eid])
assert_raises_message(sa_exc.SAWarning,
r"On mapper Mapper\|Employee\|employees, "
"primary key column 'persons.id' is being "
"combined with distinct primary key column 'employees.eid' "
"in attribute 'id'. Use explicit properties to give "
"each column its own mapped attribute name.",
self._do_test, True
)
def test_explicit_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper, primary_key=[person_table.c.id])
self._do_test(False)
def _do_test(self, composite):
session = create_session()
query = session.query(Employee)
if composite:
alice1 = query.get([1,2])
bob = query.get([2,3])
alice2 = query.get([1,2])
else:
alice1 = query.get(1)
bob = query.get(2)
alice2 = query.get(1)
assert alice1.name == alice2.name == 'alice'
assert bob.name == 'bob'
class SyncCompileTest(fixtures.MappedTest):
"""test that syncrules compile properly on custom inherit conds"""
@classmethod
def define_tables(cls, metadata):
global _a_table, _b_table, _c_table
_a_table = Table('a', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data1', String(128))
)
_b_table = Table('b', metadata,
Column('a_id', Integer, ForeignKey('a.id'), primary_key=True),
Column('data2', String(128))
)
_c_table = Table('c', metadata,
# Column('a_id', Integer, ForeignKey('b.a_id'), primary_key=True), #works
Column('b_a_id', Integer, ForeignKey('b.a_id'), primary_key=True),
Column('data3', String(128))
)
def test_joins(self):
for j1 in (None, _b_table.c.a_id==_a_table.c.id, _a_table.c.id==_b_table.c.a_id):
for j2 in (None, _b_table.c.a_id==_c_table.c.b_a_id,
_c_table.c.b_a_id==_b_table.c.a_id):
self._do_test(j1, j2)
for t in reversed(_a_table.metadata.sorted_tables):
t.delete().execute().close()
def _do_test(self, j1, j2):
class A(object):
def __init__(self, **kwargs):
for key, value in list(kwargs.items()):
setattr(self, key, value)
class B(A):
pass
class C(B):
pass
mapper(A, _a_table)
mapper(B, _b_table, inherits=A,
inherit_condition=j1
)
mapper(C, _c_table, inherits=B,
inherit_condition=j2
)
session = create_session()
a = A(data1='a1')
session.add(a)
b = B(data1='b1', data2='b2')
session.add(b)
c = C(data1='c1', data2='c2', data3='c3')
session.add(c)
session.flush()
session.expunge_all()
assert len(session.query(A).all()) == 3
assert len(session.query(B).all()) == 2
assert len(session.query(C).all()) == 1
class OverrideColKeyTest(fixtures.MappedTest):
"""test overriding of column attributes."""
@classmethod
def define_tables(cls, metadata):
global base, subtable, subtable_two
base = Table('base', metadata,
Column('base_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(255)),
Column('sqlite_fixer', String(10))
)
subtable = Table('subtable', metadata,
Column('base_id', Integer, ForeignKey('base.base_id'), primary_key=True),
Column('subdata', String(255))
)
subtable_two = Table('subtable_two', metadata,
Column('base_id', Integer, primary_key=True),
Column('fk_base_id', Integer, ForeignKey('base.base_id')),
Column('subdata', String(255))
)
def test_plain(self):
# control case
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
# Sub gets a "base_id" property using the "base_id"
# column of both tables.
eq_(
class_mapper(Sub).get_property('base_id').columns,
[subtable.c.base_id, base.c.base_id]
)
def test_override_explicit(self):
# this pattern is what you see when using declarative
# in particular, here we do a "manual" version of
# what we'd like the mapper to do.
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base, properties={
# this is the manual way to do it, is not really
# possible in declarative
'id':[base.c.base_id, subtable.c.base_id]
})
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id, subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).get(10) is s1
def test_override_onlyinparent(self):
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base)
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id]
)
eq_(
class_mapper(Sub).get_property('base_id').columns,
[subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
s2 = Sub()
s2.base_id = 15
sess = create_session()
sess.add_all([s1, s2])
sess.flush()
# s1 gets '10'
assert sess.query(Sub).get(10) is s1
# s2 gets a new id, base_id is overwritten by the ultimate
# PK col
assert s2.id == s2.base_id != 15
def test_override_implicit(self):
# this is originally [ticket:1111].
# the pattern here is now disallowed by [ticket:1892]
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
def go():
mapper(Sub, subtable, inherits=Base, properties={
'id':subtable.c.base_id
})
# Sub mapper compilation needs to detect that "base.c.base_id"
# is renamed in the inherited mapper as "id", even though
# it has its own "id" property. It then generates
# an exception in 0.7 due to the implicit conflict.
assert_raises(sa_exc.InvalidRequestError, go)
def test_pk_fk_different(self):
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base)
def go():
mapper(Sub, subtable_two, inherits=Base)
assert_raises_message(
sa_exc.SAWarning,
"Implicitly combining column base.base_id with "
"column subtable_two.base_id under attribute 'base_id'",
go
)
def test_plain_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class Base(object):
pass
class Sub(Base):
@property
def data(self):
return "im the data"
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_custom_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class MyDesc(object):
def __get__(self, instance, owner):
if instance is None:
return self
return "im the data"
class Base(object):
pass
class Sub(Base):
data = MyDesc()
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_sub_columns_over_base_descriptors(self):
class Base(object):
@property
def subdata(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.subdata == "this is base"
s1 = Sub()
s1.subdata = "this is sub"
assert s1.subdata == "this is sub"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).subdata == "this is base"
assert sess.query(Sub).get(s1.base_id).subdata == "this is sub"
def test_base_descriptors_over_base_cols(self):
class Base(object):
@property
def data(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.data == "this is base"
s1 = Sub()
assert s1.data == "this is base"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).data == "this is base"
assert sess.query(Sub).get(s1.base_id).data == "this is base"
class OptimizedLoadTest(fixtures.MappedTest):
"""tests for the "optimized load" routine."""
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)),
Column('type', String(50)),
Column('counter', Integer, server_default="1")
)
Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('sub', String(50)),
Column('subcounter', Integer, server_default="1"),
Column('subcounter2', Integer, server_default="1")
)
Table('subsub', metadata,
Column('id', Integer, ForeignKey('sub.id'), primary_key=True),
Column('subsubcounter2', Integer, server_default="1")
)
Table('with_comp', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('a', String(10)),
Column('b', String(10))
)
def test_no_optimize_on_map_to_join(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class JoinBase(fixtures.ComparableEntity):
pass
class SubJoinBase(JoinBase):
pass
mapper(Base, base)
mapper(JoinBase, base.outerjoin(sub), properties=util.OrderedDict(
[('id', [base.c.id, sub.c.id]),
('counter', [base.c.counter, sub.c.subcounter])])
)
mapper(SubJoinBase, inherits=JoinBase)
sess = Session()
sess.add(Base(data='data'))
sess.commit()
sjb = sess.query(SubJoinBase).one()
sjb_id = sjb.id
sess.expire(sjb)
# this should not use the optimized load,
# which assumes discrete tables
def go():
eq_(sjb.data, 'data')
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT base.id AS base_id, sub.id AS sub_id, "
"base.counter AS base_counter, sub.subcounter AS sub_subcounter, "
"base.data AS base_data, base.type AS base_type, "
"sub.sub AS sub_sub, sub.subcounter2 AS sub_subcounter2 "
"FROM base LEFT OUTER JOIN sub ON base.id = sub.id "
"WHERE base.id = :param_1",
{'param_1': sjb_id}
),
)
def test_optimized_passes(self):
""""test that the 'optimized load' routine doesn't crash when
a column in the join condition is not available."""
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
# redefine Sub's "id" to favor the "id" col in the subtable.
# "id" is also part of the primary join condition
mapper(Sub, sub, inherits=Base,
polymorphic_identity='sub',
properties={'id':[sub.c.id, base.c.id]})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
# load s1 via Base. s1.id won't populate since it's relative to
# the "sub" table. The optimized load kicks in and tries to
# generate on the primary join, but cannot since "id" is itself unloaded.
# the optimized load needs to return "None" so regular full-row loading proceeds
s1 = sess.query(Base).first()
assert s1.sub == 's1sub'
def test_column_expression(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat': column_property(sub.c.sub + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
s1 = sess.query(Base).first()
assert s1.concat == 's1sub|s1sub'
def test_column_expression_joined(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat': column_property(base.c.data + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
s2 = Sub(data='s2data', sub='s2sub')
s3 = Sub(data='s3data', sub='s3sub')
sess.add_all([s1, s2, s3])
sess.commit()
sess.expunge_all()
# query a bunch of rows to ensure there's no cartesian
# product against "base" occurring, it is in fact
# detecting that "base" needs to be in the join
# criterion
eq_(
sess.query(Base).order_by(Base.id).all(),
[
Sub(data='s1data', sub='s1sub', concat='s1data|s1sub'),
Sub(data='s2data', sub='s2sub', concat='s2data|s2sub'),
Sub(data='s3data', sub='s3sub', concat='s3data|s3sub')
]
)
def test_composite_column_joined(self):
base, with_comp = self.tables.base, self.tables.with_comp
class Base(fixtures.BasicEntity):
pass
class WithComp(Base):
pass
class Comp(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __composite_values__(self):
return self.a, self.b
def __eq__(self, other):
return (self.a == other.a) and (self.b == other.b)
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(WithComp, with_comp, inherits=Base, polymorphic_identity='wc', properties={
'comp': composite(Comp, with_comp.c.a, with_comp.c.b)
})
sess = sessionmaker()()
s1 = WithComp(data='s1data', comp=Comp('ham', 'cheese'))
s2 = WithComp(data='s2data', comp=Comp('bacon', 'eggs'))
sess.add_all([s1, s2])
sess.commit()
sess.expunge_all()
s1test, s2test = sess.query(Base).order_by(Base.id).all()
assert s1test.comp
assert s2test.comp
eq_(s1test.comp, Comp('ham', 'cheese'))
eq_(s2test.comp, Comp('bacon', 'eggs'))
def test_load_expired_on_pending(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
sess = Session()
s1 = Sub(data='s1')
sess.add(s1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO base (data, type) VALUES (:data, :type)",
[{'data':'s1','type':'sub'}]
),
CompiledSQL(
"INSERT INTO sub (id, sub) VALUES (:id, :sub)",
lambda ctx:{'id':s1.id, 'sub':None}
),
)
def go():
eq_( s1.subcounter2, 1 )
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT base.counter AS base_counter, sub.subcounter AS sub_subcounter, "
"sub.subcounter2 AS sub_subcounter2 FROM base JOIN sub "
"ON base.id = sub.id WHERE base.id = :param_1",
lambda ctx:{'param_1': s1.id}
),
)
def test_dont_generate_on_none(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type,
polymorphic_identity='base')
m = mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
s1 = Sub()
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is None
# loads s1.id as None
eq_(s1.id, None)
# this now will come up with a value of None for id - should reject
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is None
s1.id = 1
attributes.instance_state(s1)._commit_all(s1.__dict__, None)
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is not None
def test_load_expired_on_pending_twolevel(self):
base, sub, subsub = (self.tables.base,
self.tables.sub,
self.tables.subsub)
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class SubSub(Sub):
pass
mapper(Base, base, polymorphic_on=base.c.type,
polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
mapper(SubSub, subsub, inherits=Sub, polymorphic_identity='subsub')
sess = Session()
s1 = SubSub(data='s1', counter=1, subcounter=2)
sess.add(s1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO base (data, type, counter) VALUES "
"(:data, :type, :counter)",
[{'data':'s1','type':'subsub','counter':1}]
),
CompiledSQL(
"INSERT INTO sub (id, sub, subcounter) VALUES "
"(:id, :sub, :subcounter)",
lambda ctx:[{'subcounter': 2, 'sub': None, 'id': s1.id}]
),
CompiledSQL(
"INSERT INTO subsub (id) VALUES (:id)",
lambda ctx:{'id':s1.id}
),
)
def go():
eq_(
s1.subcounter2, 1
)
self.assert_sql_execution(
testing.db,
go,
Or(
CompiledSQL(
"SELECT subsub.subsubcounter2 AS subsub_subsubcounter2, "
"sub.subcounter2 AS sub_subcounter2 FROM subsub, sub "
"WHERE :param_1 = sub.id AND sub.id = subsub.id",
lambda ctx: {'param_1': s1.id}
),
CompiledSQL(
"SELECT sub.subcounter2 AS sub_subcounter2, "
"subsub.subsubcounter2 AS subsub_subsubcounter2 "
"FROM sub, subsub "
"WHERE :param_1 = sub.id AND sub.id = subsub.id",
lambda ctx: {'param_1': s1.id}
),
)
)
class TransientInheritingGCTest(fixtures.TestBase):
__requires__ = ('cpython',)
def _fixture(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
data = Column(String(10))
self.A = A
return Base
def setUp(self):
self.Base = self._fixture()
def tearDown(self):
self.Base.metadata.drop_all(testing.db)
#clear_mappers()
self.Base = None
def _do_test(self, go):
B = go()
self.Base.metadata.create_all(testing.db)
sess = Session(testing.db)
sess.add(B(data='some b'))
sess.commit()
b1 = sess.query(B).one()
assert isinstance(b1, B)
sess.close()
del sess
del b1
del B
gc_collect()
eq_(
len(self.A.__subclasses__()),
0)
def test_single(self):
def go():
class B(self.A):
pass
return B
self._do_test(go)
@testing.fails_if(lambda: True,
"not supported for joined inh right now.")
def test_joined(self):
def go():
class B(self.A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'),
primary_key=True)
return B
self._do_test(go)
class NoPKOnSubTableWarningTest(fixtures.TestBase):
def _fixture(self):
metadata = MetaData()
parent = Table('parent', metadata,
Column('id', Integer, primary_key=True)
)
child = Table('child', metadata,
Column('id', Integer, ForeignKey('parent.id'))
)
return parent, child
def tearDown(self):
clear_mappers()
def test_warning_on_sub(self):
parent, child = self._fixture()
class P(object):
pass
class C(P):
pass
mapper(P, parent)
assert_raises_message(
sa_exc.SAWarning,
"Could not assemble any primary keys for locally mapped "
"table 'child' - no rows will be persisted in this Table.",
mapper, C, child, inherits=P
)
def test_no_warning_with_explicit(self):
parent, child = self._fixture()
class P(object):
pass
class C(P):
pass
mapper(P, parent)
mc = mapper(C, child, inherits=P, primary_key=[parent.c.id])
eq_(mc.primary_key, (parent.c.id,))
class InhCondTest(fixtures.TestBase):
def test_inh_cond_nonexistent_table_unrelated(self):
metadata = MetaData()
base_table = Table("base", metadata,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("owner_id", Integer, ForeignKey("owner.owner_id"))
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# succeeds, despite "owner" table not configured yet
m2 = mapper(Derived, derived_table,
inherits=Base)
assert m2.inherit_condition.compare(
base_table.c.id==derived_table.c.id
)
def test_inh_cond_nonexistent_col_unrelated(self):
m = MetaData()
base_table = Table("base", m,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m,
Column("id", Integer, ForeignKey('base.id'),
primary_key=True),
Column('order_id', Integer, ForeignKey('order.foo'))
)
order_table = Table('order', m, Column('id', Integer, primary_key=True))
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# succeeds, despite "order.foo" doesn't exist
m2 = mapper(Derived, derived_table, inherits=Base)
assert m2.inherit_condition.compare(
base_table.c.id==derived_table.c.id
)
def test_inh_cond_no_fk(self):
metadata = MetaData()
base_table = Table("base", metadata,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", metadata,
Column("id", Integer, primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
assert_raises_message(
sa_exc.ArgumentError,
"Can't find any foreign key relationships between "
"'base' and 'derived'.",
mapper,
Derived, derived_table, inherits=Base
)
def test_inh_cond_nonexistent_table_related(self):
m1 = MetaData()
m2 = MetaData()
base_table = Table("base", m1,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m2,
Column("id", Integer, ForeignKey('base.id'),
primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# the ForeignKey def is correct but there are two
# different metadatas. Would like the traditional
# "noreferencedtable" error to raise so that the
# user is directed towards the FK definition in question.
assert_raises_message(
sa_exc.NoReferencedTableError,
"Foreign key associated with column 'derived.id' "
"could not find table 'base' with which to generate "
"a foreign key to target column 'id'",
mapper,
Derived, derived_table, inherits=Base
)
def test_inh_cond_nonexistent_col_related(self):
m = MetaData()
base_table = Table("base", m,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m,
Column("id", Integer, ForeignKey('base.q'),
primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
assert_raises_message(
sa_exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey "
"'base.q' on table "
"'derived': table 'base' has no column named 'q'",
mapper,
Derived, derived_table, inherits=Base
)
class PKDiscriminatorTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
parents = Table('parents', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(60)))
children = Table('children', metadata,
Column('id', Integer, ForeignKey('parents.id'),
primary_key=True),
Column('type', Integer,primary_key=True),
Column('name', String(60)))
def test_pk_as_discriminator(self):
parents, children = self.tables.parents, self.tables.children
class Parent(object):
def __init__(self, name=None):
self.name = name
class Child(object):
def __init__(self, name=None):
self.name = name
class A(Child):
pass
mapper(Parent, parents, properties={
'children': relationship(Child, backref='parent'),
})
mapper(Child, children, polymorphic_on=children.c.type,
polymorphic_identity=1)
mapper(A, inherits=Child, polymorphic_identity=2)
s = create_session()
p = Parent('p1')
a = A('a1')
p.children.append(a)
s.add(p)
s.flush()
assert a.id
assert a.type == 2
p.name='p1new'
a.name='a1new'
s.flush()
s.expire_all()
assert a.name=='a1new'
assert p.name=='p1new'
class NoPolyIdentInMiddleTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
class E(A):
pass
@classmethod
def setup_mappers(cls):
A, C, B, E, D, base = (cls.classes.A,
cls.classes.C,
cls.classes.B,
cls.classes.E,
cls.classes.D,
cls.tables.base)
mapper(A, base, polymorphic_on=base.c.type)
mapper(B, inherits=A, )
mapper(C, inherits=B, polymorphic_identity='c')
mapper(D, inherits=B, polymorphic_identity='d')
mapper(E, inherits=A, polymorphic_identity='e')
def test_load_from_middle(self):
C, B = self.classes.C, self.classes.B
s = Session()
s.add(C())
o = s.query(B).first()
eq_(o.type, 'c')
assert isinstance(o, C)
def test_load_from_base(self):
A, C = self.classes.A, self.classes.C
s = Session()
s.add(C())
o = s.query(A).first()
eq_(o.type, 'c')
assert isinstance(o, C)
def test_discriminator(self):
C, B, base = (self.classes.C,
self.classes.B,
self.tables.base)
assert class_mapper(B).polymorphic_on is base.c.type
assert class_mapper(C).polymorphic_on is base.c.type
def test_load_multiple_from_middle(self):
C, B, E, D, base = (self.classes.C,
self.classes.B,
self.classes.E,
self.classes.D,
self.tables.base)
s = Session()
s.add_all([C(), D(), E()])
eq_(
s.query(B).order_by(base.c.type).all(),
[C(), D()]
)
class DeleteOrphanTest(fixtures.MappedTest):
"""Test the fairly obvious, that an error is raised
when attempting to insert an orphan.
Previous SQLA versions would check this constraint
in memory which is the original rationale for this test.
"""
@classmethod
def define_tables(cls, metadata):
global single, parent
single = Table('single', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(50), nullable=False),
Column('data', String(50)),
Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False),
)
parent = Table('parent', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50))
)
def test_orphan_message(self):
class Base(fixtures.BasicEntity):
pass
class SubClass(Base):
pass
class Parent(fixtures.BasicEntity):
pass
mapper(Base, single, polymorphic_on=single.c.type, polymorphic_identity='base')
mapper(SubClass, inherits=Base, polymorphic_identity='sub')
mapper(Parent, parent, properties={
'related':relationship(Base, cascade="all, delete-orphan")
})
sess = create_session()
s1 = SubClass(data='s1')
sess.add(s1)
assert_raises(sa_exc.DBAPIError, sess.flush)
class PolymorphicUnionTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def _fixture(self):
t1 = table('t1', column('c1', Integer),
column('c2', Integer),
column('c3', Integer))
t2 = table('t2', column('c1', Integer), column('c2', Integer),
column('c3', Integer),
column('c4', Integer))
t3 = table('t3', column('c1', Integer),
column('c3', Integer),
column('c5', Integer))
return t1, t2, t3
def test_type_col_present(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
'q1'
),
"SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, "
"CAST(NULL AS INTEGER) AS c5, 'a' AS q1 FROM t1 UNION ALL "
"SELECT t2.c1, t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5, "
"'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, "
"CAST(NULL AS INTEGER) AS c2, t3.c3, CAST(NULL AS INTEGER) AS c4, "
"t3.c5, 'c' AS q1 FROM t3"
)
def test_type_col_non_present(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
None
),
"SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, "
"CAST(NULL AS INTEGER) AS c5 FROM t1 UNION ALL SELECT t2.c1, "
"t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5 FROM t2 "
"UNION ALL SELECT t3.c1, CAST(NULL AS INTEGER) AS c2, t3.c3, "
"CAST(NULL AS INTEGER) AS c4, t3.c5 FROM t3"
)
def test_no_cast_null(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
'q1', cast_nulls=False
),
"SELECT t1.c1, t1.c2, t1.c3, NULL AS c4, NULL AS c5, 'a' AS q1 "
"FROM t1 UNION ALL SELECT t2.c1, t2.c2, t2.c3, t2.c4, NULL AS c5, "
"'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, NULL AS c2, t3.c3, "
"NULL AS c4, t3.c5, 'c' AS q1 FROM t3"
)
class NameConflictTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
content = Table('content', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30))
)
foo = Table('foo', metadata,
Column('id', Integer, ForeignKey('content.id'),
primary_key=True),
Column('content_type', String(30))
)
def test_name_conflict(self):
class Content(object):
pass
class Foo(Content):
pass
mapper(Content, self.tables.content,
polymorphic_on=self.tables.content.c.type)
mapper(Foo, self.tables.foo, inherits=Content,
polymorphic_identity='foo')
sess = create_session()
f = Foo()
f.content_type = 'bar'
sess.add(f)
sess.flush()
f_id = f.id
sess.expunge_all()
assert sess.query(Content).get(f_id).content_type == 'bar'
| mit | -7,046,855,701,815,503,000 | 32.051519 | 108 | 0.523782 | false |
albertomurillo/ansible | test/units/modules/network/netact/test_netact_cm_command.py | 45 | 6140 | """
netact_cm_command unit tests
"""
# -*- coding: utf-8 -*-
# (c) 2017, Nokia
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=invalid-name,protected-access,function-redefined,unused-argument
# pylint: disable=unused-import,redundant-unittest-assert
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat import unittest
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
from ansible.modules.network.netact import netact_cm_command
from units.compat.mock import patch
from units.modules.utils import set_module_args, AnsibleExitJson, AnsibleFailJson, ModuleTestCase
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs):
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs):
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
def get_bin_path(self, arg, required=False):
"""Mock AnsibleModule.get_bin_path"""
if arg.endswith('netact_cm_command'):
return '/usr/bin/my_command'
else:
if required:
fail_json(msg='%r not found !' % arg)
class TestClass(unittest.TestCase):
"""
Test cases
"""
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json,
get_bin_path=get_bin_path)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
def test_module_fail_when_required_args_missing(self):
"""
Testing that command is failing if args are missing
:return:
"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
netact_cm_command.main()
self.assertTrue(False)
def test_ensure_command_called(self):
"""
Testing that command is executed with correct args
:return:
"""
set_module_args({
'operation': "Upload",
'opsName': 'Uploading_testi',
'DN': "PLMN-PLMN/MRBTS-746",
})
with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
stdout = 'configuration updated'
stderr = ''
return_code = 0
mock_run_command.return_value = return_code, stdout, stderr # successful execution
with self.assertRaises(AnsibleExitJson) as result:
netact_cm_command.main()
print(result.exception.args)
self.assertTrue(result.exception.args[0]['changed']) # ensure result is changed
mock_run_command.assert_called_once_with(
['/opt/oss/bin/racclimx.sh', '-op', 'Upload', '-opsName', 'Uploading_testi',
'-DN', 'PLMN-PLMN/MRBTS-746'],
check_rc=True)
def test_ensure_backupPlanName_outputs_correctly(self):
"""
Testing that command is executed with correct args
:return:
"""
set_module_args({
'operation': "Provision",
'opsName': 'Provision_test',
'WS': "PLMN-PLMN/MRBTS-746",
'createBackupPlan': "Yes",
'backupPlanName': "backupPlanName"
})
with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
stdout = 'configuration updated'
stderr = ''
return_code = 0
mock_run_command.return_value = return_code, stdout, stderr # successful execution
with self.assertRaises(AnsibleExitJson) as result:
netact_cm_command.main()
print(result.exception.args)
self.assertTrue(result.exception.args[0]['changed']) # ensure result is changed
mock_run_command.assert_called_once_with(
['/opt/oss/bin/racclimx.sh', '-op', 'Provision', '-opsName', 'Provision_test',
'-WS', 'PLMN-PLMN/MRBTS-746', '-createBackupPlan', 'true', '-backupPlanName', 'backupPlanName'],
check_rc=True)
def test_withwrongargs(self):
"""
Testing that wrong attribute causing error
:return:
"""
set_module_args({
'operation': "Upload",
'opsName': 'Uploading_testi',
'MR': "PLMN-PLMN/MRBTS-746",
'abc': 'abc'
})
with self.assertRaises(AnsibleFailJson):
with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
stdout = 'configuration updated'
stderr = ''
return_code = 0
mock_run_command.return_value = return_code, stdout, stderr # successful execution
with self.assertRaises(AnsibleExitJson) as result:
netact_cm_command.main()
self.assertTrue(result.exception.args[0]['changed']) # ensure result is changed
self.assertFalse(True) # ensure result is changed
| gpl-3.0 | -8,699,252,866,255,818,000 | 33.886364 | 109 | 0.613518 | false |
google-research/google-research | widget_caption/widget_caption_input.py | 1 | 12750 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Widget captioning input pipeline."""
from absl import flags
import tensorflow as tf
# Constants for embeddings.
PADDING = 0
EOS = 1
UKN = 2
START = 3
FLAGS = flags.FLAGS
def _produce_target_phrase(phrases):
"""Randomly selects one phrase as the target phrase for training."""
with tf.variable_scope('produce_output'):
# Find indices for valid phrases with meaningful tokens.
valid_phrase_indices = tf.reshape(
tf.where(tf.reduce_any(tf.greater(phrases, EOS), -1)), [-1])
# If indices is empty (no valid tokens/annotations), just use the index 0,
# otherwise random shuffle the indices and select one.
index = tf.cond(
tf.greater(tf.shape(valid_phrase_indices)[0], 0),
lambda: tf.cast(tf.random.shuffle(valid_phrase_indices)[0], tf.int32),
lambda: 0)
phrase = phrases[index]
# Append EOS to the end of phrase.
phrase = tf.boolean_mask(phrase, mask=tf.greater(phrase, PADDING))
phrase = tf.concat([phrase, [EOS]], axis=0)
# Pad the phrase to length of 11 (10 words + EOS).
phrase = tf.pad(phrase, [[0, 11 - tf.shape(phrase)[-1]]])
return phrase
def _select_phrases(dense_features):
"""Selects phrases from the workers."""
with tf.variable_scope('select_phrases'):
# Sample one phrase for each node.
output_phrase = tf.map_fn(_produce_target_phrase,
dense_features['caption_token_id'])
# Output shape: [N, seq_len]
output_phrase = tf.reshape(output_phrase, [-1, 11])
return output_phrase
def _extract_image(dense_features, num_ui_objects, target_node=None):
"""Extracts image features."""
with tf.variable_scope('extract_image'):
visible = dense_features['visibility_seq'] * dense_features[
'visibility_to_user_seq']
obj_pixels = tf.reshape(dense_features['obj_img_mat'],
[num_ui_objects, 64, 64, 3])
if target_node is not None:
obj_pixels = tf.image.rgb_to_grayscale(tf.gather(obj_pixels, target_node))
else:
obj_pixels = tf.image.rgb_to_grayscale(obj_pixels)
w = (
dense_features['cord_x_seq'][:, 1] -
dense_features['cord_x_seq'][:, 0])
h = (
dense_features['cord_y_seq'][:, 1] -
dense_features['cord_y_seq'][:, 0])
obj_visible = tf.logical_and(
tf.equal(visible, 1),
tf.logical_or(tf.greater(w, 0.005), tf.greater(h, 0.005)))
obj_pixels = tf.where(obj_visible, obj_pixels, tf.zeros_like(obj_pixels))
return tf.cast(obj_pixels, tf.float32) / 255.0, obj_visible
def filter_empty_mturk():
"""Creates a filtering function."""
def _has_mturk_captions(dense_features):
"""Check whether it has nodes with MTurk captions."""
num_nodes = tf.shape(dense_features['label_flag'])[0]
token_ids = tf.reshape(dense_features['caption_token_id'],
[num_nodes, 4, 10])
nodes_with_annotations = tf.reduce_any(
tf.reduce_any(tf.greater(token_ids, EOS), -1), -1)
original_worker_node_mask = tf.equal(dense_features['label_flag'], 0)
worker_node_mask = tf.logical_and(original_worker_node_mask,
nodes_with_annotations)
return tf.reduce_any(worker_node_mask)
return _has_mturk_captions
def parse_tf_example(serialized_example):
"""Parses a single tf example."""
keys_to_features = {
'developer_token_id': tf.VarLenFeature(tf.int64),
'resource_token_id': tf.VarLenFeature(tf.int64),
'caption_token_id': tf.VarLenFeature(tf.int64),
'caption_phrase_id': tf.VarLenFeature(tf.int64),
'gold_caption': tf.VarLenFeature(tf.string),
'clickable_seq': tf.VarLenFeature(tf.int64),
'v_distance_seq': tf.VarLenFeature(tf.float32),
'h_distance_seq': tf.VarLenFeature(tf.float32),
'type_id_seq': tf.VarLenFeature(tf.int64),
'cord_x_seq': tf.VarLenFeature(tf.float32),
'cord_y_seq': tf.VarLenFeature(tf.float32),
'visibility_to_user_seq': tf.VarLenFeature(tf.int64),
'visibility_seq': tf.VarLenFeature(tf.int64),
'label_flag': tf.VarLenFeature(tf.int64), # 0: worker 1: developer
'parent_child_seq': tf.VarLenFeature(tf.int64),
'obj_img_mat': tf.VarLenFeature(tf.int64),
'obj_dom_pos': tf.VarLenFeature(tf.int64),
'is_leaf': tf.VarLenFeature(tf.int64),
}
parsed = tf.parse_single_example(serialized_example, keys_to_features)
dense_features = {}
for key in keys_to_features:
if key in ['gold_caption']:
default_value = ''
else:
default_value = 0
dense_features[key] = tf.sparse_tensor_to_dense(
parsed[key], default_value=default_value)
return dense_features
def create_parser(word_vocab_size,
phrase_vocab_size,
max_pixel_pos=100,
max_dom_pos=500,
is_inference=False):
"""Creates a parser for tf.Example."""
def process_tf_example(dense_features):
"""Parses a single tf example."""
# Reshape the features
num_ui_objects = tf.shape(dense_features['clickable_seq'])[0]
dense_features['caption_token_id'] = tf.reshape(
dense_features['caption_token_id'], [num_ui_objects, 4, 10])
dense_features['developer_token_id'] = tf.reshape(
dense_features['developer_token_id'], [num_ui_objects, 10])
dense_features['resource_token_id'] = tf.reshape(
dense_features['resource_token_id'], [num_ui_objects, 10])
dense_features['caption_token_id'] = tf.where(
tf.greater_equal(dense_features['caption_token_id'], word_vocab_size),
tf.cast(
tf.fill(tf.shape(dense_features['caption_token_id']), UKN),
dtype=tf.int64), dense_features['caption_token_id'])
dense_features['developer_token_id'] = tf.where(
tf.greater_equal(dense_features['developer_token_id'], word_vocab_size),
tf.cast(
tf.fill(tf.shape(dense_features['developer_token_id']), UKN),
dtype=tf.int64), dense_features['developer_token_id'])
dense_features['resource_token_id'] = tf.where(
tf.greater_equal(dense_features['resource_token_id'], word_vocab_size),
tf.cast(
tf.fill(tf.shape(dense_features['resource_token_id']), UKN),
dtype=tf.int64), dense_features['resource_token_id'])
dense_features['caption_phrase_id'] = tf.where(
tf.greater_equal(dense_features['caption_phrase_id'],
phrase_vocab_size),
tf.cast(
tf.fill(tf.shape(dense_features['caption_phrase_id']), UKN),
dtype=tf.int64), dense_features['caption_phrase_id'])
dense_features['v_distance_seq'] = tf.reshape(
dense_features['v_distance_seq'], [num_ui_objects, num_ui_objects],
name='v_distance_seq')
dense_features['h_distance_seq'] = tf.reshape(
dense_features['h_distance_seq'], [num_ui_objects, num_ui_objects],
name='h_distance_seq')
dense_features['cord_x_seq'] = tf.reshape(
dense_features['cord_x_seq'], [num_ui_objects, 2], name='cord_x_seq')
dense_features['cord_y_seq'] = tf.reshape(
dense_features['cord_y_seq'], [num_ui_objects, 2], name='cord_y_seq')
dense_features['parent_child_seq'] = tf.reshape(
tf.to_int32(dense_features['parent_child_seq']), [-1, num_ui_objects],
name='parent_child_seq')
dense_features['obj_dom_pos'] = tf.where(
tf.greater_equal(dense_features['obj_dom_pos'], max_dom_pos),
tf.cast(
tf.fill(tf.shape(dense_features['obj_dom_pos']), 0),
dtype=tf.int64), dense_features['obj_dom_pos'])
feature_dict = {}
if not is_inference:
output_phrase = _select_phrases(dense_features)
feature_dict['caption_token_id'] = output_phrase
feature_dict['caption_phrase_id'] = dense_features['caption_phrase_id']
feature_dict['developer_token_id'] = dense_features['developer_token_id']
feature_dict['resource_token_id'] = dense_features['resource_token_id']
feature_dict['reference'] = dense_features['gold_caption']
# feature_dict['obj_str_seq'] = dense_features['obj_str_seq']
feature_dict['label_flag'] = dense_features['label_flag']
feature_dict['obj_is_leaf'] = dense_features['is_leaf']
obj_pixels, obj_visible = _extract_image(dense_features, num_ui_objects)
feature_dict['obj_pixels'] = obj_pixels
feature_dict['obj_visible'] = obj_visible
feature_dict['obj_screen_pos'] = tf.concat(
[dense_features['cord_x_seq'], dense_features['cord_y_seq']], -1)
feature_dict['obj_screen_pos'] = tf.to_int32(
feature_dict['obj_screen_pos'] * (max_pixel_pos - 1))
feature_dict['obj_clickable'] = dense_features['clickable_seq']
feature_dict['obj_type'] = dense_features['type_id_seq']
feature_dict['obj_adjacency'] = dense_features['parent_child_seq']
feature_dict['obj_dom_pos'] = tf.reshape(dense_features['obj_dom_pos'],
[num_ui_objects, 3])
feature_dict['obj_is_padding'] = tf.zeros(tf.shape(num_ui_objects))
for key in [
'obj_adjacency',
'obj_type',
'obj_clickable',
'obj_screen_pos',
'obj_dom_pos',
'developer_token_id',
'resource_token_id',
]:
# Add the auxiliary step dimension.
feature_dict[key] = tf.expand_dims(feature_dict[key], 0)
for key in [
'caption_token_id',
'caption_phrase_id',
'developer_token_id',
'resource_token_id',
'label_flag',
'obj_adjacency',
'obj_type',
'obj_clickable',
'obj_visible',
'obj_is_leaf',
'icon_label',
'obj_dom_pos',
'obj_is_padding',
]:
if key in feature_dict:
feature_dict[key] = tf.cast(feature_dict[key], tf.int32)
return feature_dict
return process_tf_example
def input_fn(pattern,
batch_size,
word_vocab_size,
phrase_vocab_size,
max_pixel_pos=100,
max_dom_pos=500,
epoches=1,
buffer_size=1):
"""Retrieves batches of data for training."""
# files = tf.data.Dataset.list_files(pattern)
filepaths = tf.io.gfile.glob(pattern)
dataset = tf.data.TFRecordDataset([filepaths])
dataset = dataset.map(
parse_tf_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.filter(filter_empty_mturk())
dataset = dataset.map(
create_parser(word_vocab_size, phrase_vocab_size, max_pixel_pos,
max_dom_pos),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.shuffle(buffer_size=buffer_size)
dataset = dataset.repeat(count=epoches)
padding_value_int = tf.cast(0, tf.int32)
anchor_padding_value_int = tf.cast(-1, tf.int32)
padding_info = [
('caption_token_id', [None, 11], padding_value_int),
('caption_phrase_id', [None], padding_value_int),
('developer_token_id', [1, None, 10], padding_value_int),
('resource_token_id', [1, None, 10], padding_value_int),
('reference', [None], tf.cast('', tf.string)),
('label_flag', [None], anchor_padding_value_int),
('icon_label', [None], padding_value_int),
('icon_iou', [None], 0.0),
('obj_pixels', [None, 64, 64, 1], tf.cast(0, tf.float32)),
('obj_adjacency', [1, None, None], padding_value_int),
('obj_type', [1, None], anchor_padding_value_int),
('obj_clickable', [1, None], padding_value_int),
('obj_screen_pos', [1, None, 4], padding_value_int),
('obj_dom_pos', [1, None, 3], padding_value_int),
('obj_visible', [None], padding_value_int),
('obj_is_leaf', [None], padding_value_int),
('obj_is_padding', [None], 1),
]
padded_shapes = {}
padded_values = {}
for (key, padding_shape, padding_value) in padding_info:
padded_shapes[key] = padding_shape
padded_values[key] = padding_value
dataset = dataset.padded_batch(
batch_size, padded_shapes=padded_shapes, padding_values=padded_values)
dataset = dataset.prefetch(buffer_size=1024)
return dataset
| apache-2.0 | -1,863,721,539,412,627,700 | 38.351852 | 80 | 0.626824 | false |
jbedorf/tensorflow | tensorflow/python/platform/gfile.py | 16 | 3104 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import router for file_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.lib.io.file_io import copy as Copy
from tensorflow.python.lib.io.file_io import create_dir as MkDir
from tensorflow.python.lib.io.file_io import delete_file as Remove
from tensorflow.python.lib.io.file_io import delete_recursively as DeleteRecursively
from tensorflow.python.lib.io.file_io import file_exists as Exists
from tensorflow.python.lib.io.file_io import FileIO as _FileIO
from tensorflow.python.lib.io.file_io import get_matching_files as Glob
from tensorflow.python.lib.io.file_io import is_directory as IsDirectory
from tensorflow.python.lib.io.file_io import list_directory as ListDirectory
from tensorflow.python.lib.io.file_io import recursive_create_dir as MakeDirs
from tensorflow.python.lib.io.file_io import rename as Rename
from tensorflow.python.lib.io.file_io import stat as Stat
from tensorflow.python.lib.io.file_io import walk as Walk
# pylint: enable=unused-import
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
@tf_export('io.gfile.GFile', v1=['gfile.GFile', 'gfile.Open', 'io.gfile.GFile'])
class GFile(_FileIO):
"""File I/O wrappers without thread locking.
Note, that this is somewhat like builtin Python file I/O, but
there are semantic differences to make it more efficient for
some backing filesystems. For example, a write mode file will
not be opened until the first write call (to minimize RPC
invocations in network filesystems).
"""
def __init__(self, name, mode='r'):
super(GFile, self).__init__(name=name, mode=mode)
@tf_export(v1=['gfile.FastGFile'])
class FastGFile(_FileIO):
"""File I/O wrappers without thread locking.
Note, that this is somewhat like builtin Python file I/O, but
there are semantic differences to make it more efficient for
some backing filesystems. For example, a write mode file will
not be opened until the first write call (to minimize RPC
invocations in network filesystems).
"""
@deprecated(None, 'Use tf.gfile.GFile.')
def __init__(self, name, mode='r'):
super(FastGFile, self).__init__(name=name, mode=mode)
# Does not alias to Open so that we use our version of GFile to strip
# 'b' mode.
Open = GFile
| apache-2.0 | -9,055,754,543,865,819,000 | 41.520548 | 84 | 0.736469 | false |
dhenyjarasandy/scrapy | scrapy/settings/__init__.py | 124 | 6388 | import six
import json
import copy
import warnings
from collections import MutableMapping
from importlib import import_module
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
from . import default_settings
SETTINGS_PRIORITIES = {
'default': 0,
'command': 10,
'project': 20,
'spider': 30,
'cmdline': 40,
}
class SettingsAttribute(object):
"""Class for storing data related to settings attributes.
This class is intended for internal usage, you should try Settings class
for settings configuration, not this one.
"""
def __init__(self, value, priority):
self.value = value
self.priority = priority
def set(self, value, priority):
"""Sets value if priority is higher or equal than current priority."""
if priority >= self.priority:
self.value = value
self.priority = priority
def __str__(self):
return "<SettingsAttribute value={self.value!r} " \
"priority={self.priority}>".format(self=self)
__repr__ = __str__
class Settings(object):
def __init__(self, values=None, priority='project'):
self.frozen = False
self.attributes = {}
self.setmodule(default_settings, priority='default')
if values is not None:
self.setdict(values, priority)
def __getitem__(self, opt_name):
value = None
if opt_name in self.attributes:
value = self.attributes[opt_name].value
return value
def get(self, name, default=None):
return self[name] if self[name] is not None else default
def getbool(self, name, default=False):
"""
True is: 1, '1', True
False is: 0, '0', False, None
"""
return bool(int(self.get(name, default)))
def getint(self, name, default=0):
return int(self.get(name, default))
def getfloat(self, name, default=0.0):
return float(self.get(name, default))
def getlist(self, name, default=None):
value = self.get(name, default or [])
if isinstance(value, six.string_types):
value = value.split(',')
return list(value)
def getdict(self, name, default=None):
value = self.get(name, default or {})
if isinstance(value, six.string_types):
value = json.loads(value)
return dict(value)
def set(self, name, value, priority='project'):
self._assert_mutability()
if isinstance(priority, six.string_types):
priority = SETTINGS_PRIORITIES[priority]
if name not in self.attributes:
self.attributes[name] = SettingsAttribute(value, priority)
else:
self.attributes[name].set(value, priority)
def setdict(self, values, priority='project'):
self._assert_mutability()
for name, value in six.iteritems(values):
self.set(name, value, priority)
def setmodule(self, module, priority='project'):
self._assert_mutability()
if isinstance(module, six.string_types):
module = import_module(module)
for key in dir(module):
if key.isupper():
self.set(key, getattr(module, key), priority)
def _assert_mutability(self):
if self.frozen:
raise TypeError("Trying to modify an immutable Settings object")
def copy(self):
return copy.deepcopy(self)
def freeze(self):
self.frozen = True
def frozencopy(self):
copy = self.copy()
copy.freeze()
return copy
@property
def overrides(self):
warnings.warn("`Settings.overrides` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='cmdline')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._overrides
except AttributeError:
self._overrides = o = _DictProxy(self, 'cmdline')
return o
@property
def defaults(self):
warnings.warn("`Settings.defaults` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='default')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._defaults
except AttributeError:
self._defaults = o = _DictProxy(self, 'default')
return o
class _DictProxy(MutableMapping):
def __init__(self, settings, priority):
self.o = {}
self.settings = settings
self.priority = priority
def __len__(self):
return len(self.o)
def __getitem__(self, k):
return self.o[k]
def __setitem__(self, k, v):
self.settings.set(k, v, priority=self.priority)
self.o[k] = v
def __delitem__(self, k):
del self.o[k]
def __iter__(self, k, v):
return iter(self.o)
class CrawlerSettings(Settings):
def __init__(self, settings_module=None, **kw):
Settings.__init__(self, **kw)
self.settings_module = settings_module
def __getitem__(self, opt_name):
if opt_name in self.overrides:
return self.overrides[opt_name]
if self.settings_module and hasattr(self.settings_module, opt_name):
return getattr(self.settings_module, opt_name)
if opt_name in self.defaults:
return self.defaults[opt_name]
return Settings.__getitem__(self, opt_name)
def __str__(self):
return "<CrawlerSettings module=%r>" % self.settings_module
CrawlerSettings = create_deprecated_class(
'CrawlerSettings', CrawlerSettings,
new_class_path='scrapy.settings.Settings')
def iter_default_settings():
"""Return the default settings as an iterator of (name, value) tuples"""
for name in dir(default_settings):
if name.isupper():
yield name, getattr(default_settings, name)
def overridden_settings(settings):
"""Return a dict of the settings that have been overridden"""
for name, defvalue in iter_default_settings():
value = settings[name]
if not isinstance(defvalue, dict) and value != defvalue:
yield name, value
| bsd-3-clause | -3,847,352,507,335,712,000 | 29.274882 | 80 | 0.605823 | false |
richardcs/ansible | lib/ansible/utils/listify.py | 100 | 1497 | # (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types
from ansible.module_utils.common._collections_compat import Iterable
from ansible.template.safe_eval import safe_eval
__all__ = ['listify_lookup_plugin_terms']
def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=True, convert_bare=False):
if isinstance(terms, string_types):
terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
else:
terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
if isinstance(terms, string_types) or not isinstance(terms, Iterable):
terms = [terms]
return terms
| gpl-3.0 | -1,302,617,522,038,712,000 | 36.425 | 111 | 0.744823 | false |
bmya/odoo_addons | smile_account_voucher_group/models/__init__.py | 3 | 1087 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import payment_method
import account_move_line
import account_invoice
import account_voucher
import res_partner | agpl-3.0 | -8,591,281,851,842,109,000 | 40.846154 | 78 | 0.624655 | false |
kvar/ansible | contrib/inventory/vmware.py | 37 | 18538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMware Inventory Script
=======================
Retrieve information about virtual machines from a vCenter server or
standalone ESX host. When `group_by=false` (in the INI file), host systems
are also returned in addition to VMs.
This script will attempt to read configuration from an INI file with the same
base filename if present, or `vmware.ini` if not. It is possible to create
symlinks to the inventory script to support multiple configurations, e.g.:
* `vmware.py` (this script)
* `vmware.ini` (default configuration, will be read by `vmware.py`)
* `vmware_test.py` (symlink to `vmware.py`)
* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`)
* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no
`vmware_other.ini` exists)
The path to an INI file may also be specified via the `VMWARE_INI` environment
variable, in which case the filename matching rules above will not apply.
Host and authentication parameters may be specified via the `VMWARE_HOST`,
`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will
take precedence over options present in the INI file. An INI file is not
required if these options are specified using environment variables.
'''
from __future__ import print_function
import json
import logging
import optparse
import os
import ssl
import sys
import time
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.six import integer_types, text_type, string_types
from ansible.module_utils.six.moves import configparser
# Disable logging message trigged by pSphere/suds.
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler())
from psphere.client import Client
from psphere.errors import ObjectNotFoundError
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network, ClusterComputeResource
from suds.sudsobject import Object as SudsObject
class VMwareInventory(object):
def __init__(self, guests_only=None):
self.config = configparser.SafeConfigParser()
if os.environ.get('VMWARE_INI', ''):
config_files = [os.environ['VMWARE_INI']]
else:
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
for config_file in config_files:
if os.path.exists(config_file):
self.config.read(config_file)
break
# Retrieve only guest VMs, or include host systems?
if guests_only is not None:
self.guests_only = guests_only
elif self.config.has_option('defaults', 'guests_only'):
self.guests_only = self.config.getboolean('defaults', 'guests_only')
else:
self.guests_only = True
# Read authentication information from VMware environment variables
# (if set), otherwise from INI file.
auth_host = os.environ.get('VMWARE_HOST')
if not auth_host and self.config.has_option('auth', 'host'):
auth_host = self.config.get('auth', 'host')
auth_user = os.environ.get('VMWARE_USER')
if not auth_user and self.config.has_option('auth', 'user'):
auth_user = self.config.get('auth', 'user')
auth_password = os.environ.get('VMWARE_PASSWORD')
if not auth_password and self.config.has_option('auth', 'password'):
auth_password = self.config.get('auth', 'password')
sslcheck = os.environ.get('VMWARE_SSLCHECK')
if not sslcheck and self.config.has_option('auth', 'sslcheck'):
sslcheck = self.config.get('auth', 'sslcheck')
if not sslcheck:
sslcheck = True
else:
if sslcheck.lower() in ['no', 'false']:
sslcheck = False
else:
sslcheck = True
# Limit the clusters being scanned
self.filter_clusters = os.environ.get('VMWARE_CLUSTERS')
if not self.filter_clusters and self.config.has_option('defaults', 'clusters'):
self.filter_clusters = self.config.get('defaults', 'clusters')
if self.filter_clusters:
self.filter_clusters = [x.strip() for x in self.filter_clusters.split(',') if x.strip()]
# Override certificate checks
if not sslcheck:
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# Create the VMware client connection.
self.client = Client(auth_host, auth_user, auth_password)
def _put_cache(self, name, value):
'''
Saves the value to cache with the name given.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, name)
with open(cache_file, 'w') as cache:
json.dump(value, cache)
def _get_cache(self, name, default=None):
'''
Retrieves the value from cache for the given name.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = self.config.get('defaults', 'cache_dir')
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
if self.config.has_option('defaults', 'cache_max_age'):
cache_max_age = self.config.getint('defaults', 'cache_max_age')
else:
cache_max_age = 0
cache_stat = os.stat(cache_file)
if (cache_stat.st_mtime + cache_max_age) >= time.time():
with open(cache_file) as cache:
return json.load(cache)
return default
def _flatten_dict(self, d, parent_key='', sep='_'):
'''
Flatten nested dicts by combining keys with a separator. Lists with
only string items are included as is; any other lists are discarded.
'''
items = []
for k, v in d.items():
if k.startswith('_'):
continue
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep).items())
elif isinstance(v, (list, tuple)):
if all([isinstance(x, string_types) for x in v]):
items.append((new_key, v))
else:
items.append((new_key, v))
return dict(items)
def _get_obj_info(self, obj, depth=99, seen=None):
'''
Recursively build a data structure for the given pSphere object (depth
only applies to ManagedObject instances).
'''
seen = seen or set()
if isinstance(obj, ManagedObject):
try:
obj_unicode = text_type(getattr(obj, 'name'))
except AttributeError:
obj_unicode = ()
if obj in seen:
return obj_unicode
seen.add(obj)
if depth <= 0:
return obj_unicode
d = {}
for attr in dir(obj):
if attr.startswith('_'):
continue
try:
val = getattr(obj, attr)
obj_info = self._get_obj_info(val, depth - 1, seen)
if obj_info != ():
d[attr] = obj_info
except Exception as e:
pass
return d
elif isinstance(obj, SudsObject):
d = {}
for key, val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
d[key] = obj_info
return d
elif isinstance(obj, (list, tuple)):
l = []
for val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
l.append(obj_info)
return l
elif isinstance(obj, (type(None), bool, float) + string_types + integer_types):
return obj
else:
return ()
def _get_host_info(self, host, prefix='vmware'):
'''
Return a flattened dict with info about the given host system.
'''
host_info = {
'name': host.name,
}
for attr in ('datastore', 'network', 'vm'):
try:
value = getattr(host, attr)
host_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
host_info['%ss' % attr] = []
for k, v in self._get_obj_info(host.summary, depth=0).items():
if isinstance(v, MutableMapping):
for k2, v2 in v.items():
host_info[k2] = v2
elif k != 'host':
host_info[k] = v
try:
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
except Exception as e:
print(e, file=sys.stderr)
host_info = self._flatten_dict(host_info, prefix)
if ('%s_ipAddress' % prefix) in host_info:
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
return host_info
def _get_vm_info(self, vm, prefix='vmware'):
'''
Return a flattened dict with info about the given virtual machine.
'''
vm_info = {
'name': vm.name,
}
for attr in ('datastore', 'network'):
try:
value = getattr(vm, attr)
vm_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
vm_info['%ss' % attr] = []
try:
vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0)
except AttributeError:
vm_info['resourcePool'] = ''
try:
vm_info['guestState'] = vm.guest.guestState
except AttributeError:
vm_info['guestState'] = ''
for k, v in self._get_obj_info(vm.summary, depth=0).items():
if isinstance(v, MutableMapping):
for k2, v2 in v.items():
if k2 == 'host':
k2 = 'hostSystem'
vm_info[k2] = v2
elif k != 'vm':
vm_info[k] = v
vm_info = self._flatten_dict(vm_info, prefix)
if ('%s_ipAddress' % prefix) in vm_info:
vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix]
return vm_info
def _add_host(self, inv, parent_group, host_name):
'''
Add the host to the parent group in the given inventory.
'''
p_group = inv.setdefault(parent_group, [])
if isinstance(p_group, dict):
group_hosts = p_group.setdefault('hosts', [])
else:
group_hosts = p_group
if host_name not in group_hosts:
group_hosts.append(host_name)
def _add_child(self, inv, parent_group, child_group):
'''
Add a child group to a parent group in the given inventory.
'''
if parent_group != 'all':
p_group = inv.setdefault(parent_group, {})
if not isinstance(p_group, dict):
inv[parent_group] = {'hosts': p_group}
p_group = inv[parent_group]
group_children = p_group.setdefault('children', [])
if child_group not in group_children:
group_children.append(child_group)
inv.setdefault(child_group, [])
def get_inventory(self, meta_hostvars=True):
'''
Reads the inventory from cache or VMware API via pSphere.
'''
# Use different cache names for guests only vs. all hosts.
if self.guests_only:
cache_name = '__inventory_guests__'
else:
cache_name = '__inventory_all__'
inv = self._get_cache(cache_name, None)
if inv is not None:
return inv
inv = {'all': {'hosts': []}}
if meta_hostvars:
inv['_meta'] = {'hostvars': {}}
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if not self.guests_only:
if self.config.has_option('defaults', 'hw_group'):
hw_group = self.config.get('defaults', 'hw_group')
else:
hw_group = default_group + '_hw'
if self.config.has_option('defaults', 'vm_group'):
vm_group = self.config.get('defaults', 'vm_group')
else:
vm_group = default_group + '_vm'
if self.config.has_option('defaults', 'prefix_filter'):
prefix_filter = self.config.get('defaults', 'prefix_filter')
else:
prefix_filter = None
if self.filter_clusters:
# Loop through clusters and find hosts:
hosts = []
for cluster in ClusterComputeResource.all(self.client):
if cluster.name in self.filter_clusters:
for host in cluster.host:
hosts.append(host)
else:
# Get list of all physical hosts
hosts = HostSystem.all(self.client)
# Loop through physical hosts:
for host in hosts:
if not self.guests_only:
self._add_host(inv, 'all', host.name)
self._add_host(inv, hw_group, host.name)
host_info = self._get_host_info(host)
if meta_hostvars:
inv['_meta']['hostvars'][host.name] = host_info
self._put_cache(host.name, host_info)
# Loop through all VMs on physical host.
for vm in host.vm:
if prefix_filter:
if vm.name.startswith(prefix_filter):
continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)
vm_info = self._get_vm_info(vm)
if meta_hostvars:
inv['_meta']['hostvars'][vm.name] = vm_info
self._put_cache(vm.name, vm_info)
# Group by resource pool.
vm_resourcePool = vm_info.get('vmware_resourcePool', None)
if vm_resourcePool:
self._add_child(inv, vm_group, 'resource_pools')
self._add_child(inv, 'resource_pools', vm_resourcePool)
self._add_host(inv, vm_resourcePool, vm.name)
# Group by datastore.
for vm_datastore in vm_info.get('vmware_datastores', []):
self._add_child(inv, vm_group, 'datastores')
self._add_child(inv, 'datastores', vm_datastore)
self._add_host(inv, vm_datastore, vm.name)
# Group by network.
for vm_network in vm_info.get('vmware_networks', []):
self._add_child(inv, vm_group, 'networks')
self._add_child(inv, 'networks', vm_network)
self._add_host(inv, vm_network, vm.name)
# Group by guest OS.
vm_guestId = vm_info.get('vmware_guestId', None)
if vm_guestId:
self._add_child(inv, vm_group, 'guests')
self._add_child(inv, 'guests', vm_guestId)
self._add_host(inv, vm_guestId, vm.name)
# Group all VM templates.
vm_template = vm_info.get('vmware_template', False)
if vm_template:
self._add_child(inv, vm_group, 'templates')
self._add_host(inv, 'templates', vm.name)
self._put_cache(cache_name, inv)
return inv
def get_host(self, hostname):
'''
Read info about a specific host or VM from cache or VMware API.
'''
inv = self._get_cache(hostname, None)
if inv is not None:
return inv
if not self.guests_only:
try:
host = HostSystem.get(self.client, name=hostname)
inv = self._get_host_info(host)
except ObjectNotFoundError:
pass
if inv is None:
try:
vm = VirtualMachine.get(self.client, name=hostname)
inv = self._get_vm_info(vm)
except ObjectNotFoundError:
pass
if inv is not None:
self._put_cache(hostname, inv)
return inv or {}
def main():
parser = optparse.OptionParser()
parser.add_option('--list', action='store_true', dest='list',
default=False, help='Output inventory groups and hosts')
parser.add_option('--host', dest='host', default=None, metavar='HOST',
help='Output variables only for the given hostname')
# Additional options for use when running the script standalone, but never
# used by Ansible.
parser.add_option('--pretty', action='store_true', dest='pretty',
default=False, help='Output nicely-formatted JSON')
parser.add_option('--include-host-systems', action='store_true',
dest='include_host_systems', default=False,
help='Include host systems in addition to VMs')
parser.add_option('--no-meta-hostvars', action='store_false',
dest='meta_hostvars', default=True,
help='Exclude [\'_meta\'][\'hostvars\'] with --list')
options, args = parser.parse_args()
if options.include_host_systems:
vmware_inventory = VMwareInventory(guests_only=False)
else:
vmware_inventory = VMwareInventory()
if options.host is not None:
inventory = vmware_inventory.get_host(options.host)
else:
inventory = vmware_inventory.get_inventory(options.meta_hostvars)
json_kwargs = {}
if options.pretty:
json_kwargs.update({'indent': 4, 'sort_keys': True})
json.dump(inventory, sys.stdout, **json_kwargs)
if __name__ == '__main__':
main()
| gpl-3.0 | 320,618,237,562,737,800 | 38.275424 | 109 | 0.552379 | false |
trondeau/gnuradio-old | gr-filter/python/filter/design/fir_design.py | 11 | 15888 | # Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import scipy
from gnuradio import filter
from PyQt4 import QtGui
# Filter design functions using a window
def design_win_lpf(fs, gain, wintype, mainwin):
ret = True
pb,r = mainwin.gui.endofLpfPassBandEdit.text().toDouble()
ret = r and ret
sb,r = mainwin.gui.startofLpfStopBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.lpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
tb = sb - pb
try:
taps = filter.firdes.low_pass_2(gain, fs, pb, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "lpf", "pbend": pb, "sbstart": sb,
"atten": atten, "ntaps": len(taps)}
return (taps, params, ret)
else:
return ([], [], ret)
def design_win_bpf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.band_pass_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "bpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_cbpf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.complex_band_pass_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "cbpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_bnf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBnfStopBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBnfStopBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bnfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bnfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.band_reject_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "bnf", "sbstart": pb1, "sbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_hpf(fs, gain, wintype, mainwin):
ret = True
sb,r = mainwin.gui.endofHpfStopBandEdit.text().toDouble()
ret = r and ret
pb,r = mainwin.gui.startofHpfPassBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.hpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
tb = pb - sb
try:
taps = filter.firdes.high_pass_2(gain, fs, pb, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "hpf", "sbend": sb, "pbstart": pb,
"atten": atten, "ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_hb(fs, gain, wintype, mainwin):
ret = True
filtord,r = mainwin.gui.firhbordEdit.text().toDouble()
ret = r and ret
trwidth,r = mainwin.gui.firhbtrEdit.text().toDouble()
ret = r and ret
filtwin = { filter.firdes.WIN_HAMMING : 'hamming',
filter.firdes.WIN_HANN : 'hanning',
filter.firdes.WIN_BLACKMAN : 'blackman',
filter.firdes.WIN_RECTANGULAR: 'boxcar',
filter.firdes.WIN_KAISER: ('kaiser', 4.0),
filter.firdes.WIN_BLACKMAN_hARRIS: 'blackmanharris'}
if int(filtord) & 1:
reply = QtGui.QMessageBox.information(mainwin, "Filter order should be even",
"Filter order should be even","&Ok")
return ([],[],False)
if(ret):
taps = scipy.signal.firwin(int(filtord)+1, 0.5, window = filtwin[wintype])
taps[abs(taps) <= 1e-6] = 0.
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "hb","ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_rrc(fs, gain, wintype, mainwin):
ret = True
sr,r = mainwin.gui.rrcSymbolRateEdit.text().toDouble()
ret = r and ret
alpha,r = mainwin.gui.rrcAlphaEdit.text().toDouble()
ret = r and ret
ntaps,r = mainwin.gui.rrcNumTapsEdit.text().toInt()
ret = r and ret
if(ret):
try:
taps = filter.firdes.root_raised_cosine(gain, fs, sr,
alpha, ntaps)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "rrc", "srate": sr, "alpha": alpha,
"ntaps": ntaps}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_gaus(fs, gain, wintype, mainwin):
ret = True
sr,r = mainwin.gui.gausSymbolRateEdit.text().toDouble()
ret = r and ret
bt,r = mainwin.gui.gausBTEdit.text().toDouble()
ret = r and ret
ntaps,r = mainwin.gui.gausNumTapsEdit.text().toInt()
ret = r and ret
if(ret):
spb = fs / sr
try:
taps = filter.firdes.gaussian(gain, spb, bt, ntaps)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "gaus", "srate": sr, "bt": bt,
"ntaps": ntaps}
return (taps,params,ret)
else:
return ([],[],ret)
# Design Functions for Equiripple Filters
def design_opt_lpf(fs, gain, mainwin):
ret = True
pb,r = mainwin.gui.endofLpfPassBandEdit.text().toDouble()
ret = r and ret
sb,r = mainwin.gui.startofLpfStopBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.lpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.lpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.optfir.low_pass(gain, fs, pb, sb,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "lpf", "pbend": pb, "sbstart": sb,
"atten": atten, "ripple": ripple, "ntaps": len(taps)}
return (taps, params, ret)
else:
return ([], [], ret)
def design_opt_bpf(fs, gain, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(r):
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.band_pass(gain, fs, sb1, pb1, pb2, sb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],r)
def design_opt_cbpf(fs, gain, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(r):
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.complex_band_pass(gain, fs, sb1, pb1, pb2, sb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "cbpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],r)
def design_opt_bnf(fs, gain, mainwin):
ret = True
sb1,r = mainwin.gui.startofBnfStopBandEdit.text().toDouble()
ret = r and ret
sb2,r = mainwin.gui.endofBnfStopBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bnfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bnfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bnfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
pb1 = sb1 - tb
pb2 = sb2 + tb
try:
taps = filter.optfir.band_reject(gain, fs, pb1, sb1, sb2, pb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bnf", "sbstart": pb1, "sbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_opt_hb(fs, gain, mainwin):
ret = True
filtord,r = mainwin.gui.firhbordEdit.text().toDouble()
ret = r and ret
trwidth,r = mainwin.gui.firhbtrEdit.text().toDouble()
ret = r and ret
if int(filtord) & 1:
reply = QtGui.QMessageBox.information(mainwin, "Filter order should be even",
"Filter order should be even","&Ok")
return ([],[],False)
if(ret):
try:
bands = [0,.25 - (trwidth/fs), .25 + (trwidth/fs), 0.5]
taps = scipy.signal.remez(int(filtord)+1, bands, [1,0], [1,1])
taps[abs(taps) <= 1e-6] = 0.
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter Design Error",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "hb", "ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_opt_hpf(fs, gain, mainwin):
ret = True
sb,r = mainwin.gui.endofHpfStopBandEdit.text().toDouble()
ret = r and ret
pb,r = mainwin.gui.startofHpfPassBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.hpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.hpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.optfir.high_pass(gain, fs, sb, pb,
atten, ripple)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "hpf", "sbend": sb, "pbstart": pb,
"atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
| gpl-3.0 | 7,824,394,102,915,052,000 | 37.563107 | 85 | 0.52291 | false |
wimmuskee/mangrove | mangrove_libs/common.py | 1 | 3122 | # -*- coding: utf-8 -*-
"""
This module contains common functions
for the mangrove crawler.
Wim Muskee, 2013-2018
[email protected]
License: GPL-3
"""
def getConfig(configfile,section):
import json
with open(configfile, "r") as f:
configdata = json.loads(f.read())
config = {}
config.update(configdata["common"])
config.update(configdata[section])
config["configuration"] = section
return config
""" Dynamically import a method """
def import_from(module, name):
import importlib
module = __import__(module, fromlist=[name])
return getattr(module, name)
""" Download a file using chunks to deal with large files. Disable default compression handling. """
def downloadFile(httpProxy,source,dest):
import requests
headers = {"Accept-Encoding": "identity"}
r = requests.get(source, stream=True, proxies=httpProxy, headers=headers)
with open(dest, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
def gzUnpack(source,dest):
import gzip
f = gzip.open(source,'rb')
output = open(dest,'wb')
output.write(f.read())
output.close()
def bz2Unpack(source,dest):
from bz2 import BZ2File
f = BZ2File( source, 'r')
output = open(dest,'wb')
output.write(f.read())
output.close()
def checkLocal():
from os import path, getcwd
if path.isdir( getcwd() + "/share" ):
return true
else:
return false
def getHttplib2Proxy(proxy_host,proxy_port):
import httplib2
import socks
return httplib2.Http(proxy_info = httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP, proxy_host, int(proxy_port), False))
# not actively used, keeping it however, just in case ...
def getUrllib2Proxy(proxy_host,proxy_port):
import urllib2
return urllib2.ProxyHandler({"http": proxy_host + ":" + proxy_port})
def getRequestsProxy(proxy_host,proxy_port):
return { "http": proxy_host + ":" + proxy_port }
""" Return path of program if exists, http://stackoverflow.com/a/377028/426990 """
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
""" Quit when one of the programs is not found """
def checkPrograms(programlist):
for p in programlist:
if not which(p):
raise RuntimeError( "executable does not exist: " + p )
""" return simple logger object """
def getLogger(application):
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
return logging.getLogger(application)
""" from for instance 2012-10-23T16:39:06Z """
def getTimestampFromZuluDT(dt):
from datetime import datetime
return int((datetime.strptime( dt, "%Y-%m-%dT%H:%M:%SZ") - datetime(1970, 1, 1)).total_seconds())
""" pretty printer for debug """
def prettyPrint(data):
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(data)
| gpl-3.0 | -1,825,093,449,538,365,000 | 23.582677 | 113 | 0.704676 | false |
akd001/RPi | testRainbow.py | 1 | 1361 | import os, sys, subprocess, time
from rainbowhatwrapper.handlers import *
#CONSTANTS
BUTTON_A_STATE = False
BUTTON_B_STATE = False
BUTTON_C_STATE = False
def showUptime():
while True:
test = subprocess.Popen(["uptime"], stdout=subprocess.PIPE)
output = test.communicate()[0].split()[0].split(':')
hour = output[0]
mins = output[1]
RhDisplayHandler.printOnDisplay(hour + mins)
time.sleep(15)
def main():
RhPixelHandler.setPixel(0, 1, 1, 1)
RhPixelHandler.setPixel(1, 1, 1, 1)
RhPixelHandler.setPixel(2, 1, 1, 1)
RhPixelHandler.setPixel(3, 1, 1, 1)
RhPixelHandler.setPixel(4, 1, 1, 1)
RhPixelHandler.setPixel(5, 1, 1, 1)
RhPixelHandler.setPixel(6, 1, 1, 1)
RhBuzzerHandler.playBeginning()
# showUptime()
# song = [68, 68, 68, 69, 70, 70, 69, 70, 71, 72]
# for note in song:
# RhBuzzerHandler.playMidi(note, 0.5)
# time.sleep(1)
# RhBuzzerHandler.play(261, 1)
# print (RhWeatherHandler.getTemperature())
# print (RhWeatherHandler.getPressure())
RhDisplayHandler.printOnDisplay("hello.world.")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print ('Interrupted!')
try:
RhDisplayHandler.clear()
sys.exit(0)
except SystemExit:
os._exit(0)
| mit | 5,096,596,720,794,866,000 | 27.354167 | 67 | 0.613519 | false |
IntelLabs/numba | numba/tests/test_sort.py | 7 | 33874 | import copy
import itertools
import math
import random
import sys
import numpy as np
from numba.core.compiler import compile_isolated, Flags
from numba import jit, njit
from numba.core import types, utils, errors
import unittest
from numba import testing
from numba.tests.support import TestCase, MemoryLeakMixin, tag
from numba.misc.quicksort import make_py_quicksort, make_jit_quicksort
from numba.misc.mergesort import make_jit_mergesort
from numba.misc.timsort import make_py_timsort, make_jit_timsort, MergeRun
def make_temp_list(keys, n):
return [keys[0]] * n
def make_temp_array(keys, n):
return np.empty(n, keys.dtype)
py_list_timsort = make_py_timsort(make_temp_list)
py_array_timsort = make_py_timsort(make_temp_array)
jit_list_timsort = make_jit_timsort(make_temp_list)
jit_array_timsort = make_jit_timsort(make_temp_array)
py_quicksort = make_py_quicksort()
jit_quicksort = make_jit_quicksort()
def sort_usecase(val):
val.sort()
def argsort_usecase(val):
return val.argsort()
def argsort_kind_usecase(val, is_stable=False):
if is_stable:
return val.argsort(kind='mergesort')
else:
return val.argsort(kind='quicksort')
def sorted_usecase(val):
return sorted(val)
def sorted_reverse_usecase(val, b):
return sorted(val, reverse=b)
def np_sort_usecase(val):
return np.sort(val)
def np_argsort_usecase(val):
return np.argsort(val)
def np_argsort_kind_usecase(val, is_stable=False):
if is_stable:
return np.argsort(val, kind='mergesort')
else:
return np.argsort(val, kind='quicksort')
def list_sort_usecase(n):
np.random.seed(42)
l = []
for i in range(n):
l.append(np.random.random())
ll = l[:]
ll.sort()
return l, ll
def list_sort_reverse_usecase(n, b):
np.random.seed(42)
l = []
for i in range(n):
l.append(np.random.random())
ll = l[:]
ll.sort(reverse=b)
return l, ll
class BaseSortingTest(object):
def random_list(self, n, offset=10):
random.seed(42)
l = list(range(offset, offset + n))
random.shuffle(l)
return l
def sorted_list(self, n, offset=10):
return list(range(offset, offset + n))
def revsorted_list(self, n, offset=10):
return list(range(offset, offset + n))[::-1]
def initially_sorted_list(self, n, m=None, offset=10):
if m is None:
m = n // 2
l = self.sorted_list(m, offset)
l += self.random_list(n - m, offset=l[-1] + offset)
return l
def duprandom_list(self, n, factor=None, offset=10):
random.seed(42)
if factor is None:
factor = int(math.sqrt(n))
l = (list(range(offset, offset + (n // factor) + 1)) * (factor + 1))[:n]
assert len(l) == n
random.shuffle(l)
return l
def dupsorted_list(self, n, factor=None, offset=10):
if factor is None:
factor = int(math.sqrt(n))
l = (list(range(offset, offset + (n // factor) + 1)) * (factor + 1))[:n]
assert len(l) == n, (len(l), n)
l.sort()
return l
def assertSorted(self, orig, result):
self.assertEqual(len(result), len(orig))
# sorted() returns a list, so make sure we compare to another list
self.assertEqual(list(result), sorted(orig))
def assertSortedValues(self, orig, orig_values, result, result_values):
self.assertEqual(len(result), len(orig))
self.assertEqual(list(result), sorted(orig))
zip_sorted = sorted(zip(orig, orig_values), key=lambda x: x[0])
zip_result = list(zip(result, result_values))
self.assertEqual(zip_sorted, zip_result)
# Check stability
for i in range(len(zip_result) - 1):
(k1, v1), (k2, v2) = zip_result[i], zip_result[i + 1]
if k1 == k2:
# Assuming values are unique, which is enforced by the tests
self.assertLess(orig_values.index(v1), orig_values.index(v2))
def fibo(self):
a = 1
b = 1
while True:
yield a
a, b = b, a + b
def make_sample_sorted_lists(self, n):
lists = []
for offset in (20, 120):
lists.append(self.sorted_list(n, offset))
lists.append(self.dupsorted_list(n, offset))
return lists
def make_sample_lists(self, n):
lists = []
for offset in (20, 120):
lists.append(self.sorted_list(n, offset))
lists.append(self.dupsorted_list(n, offset))
lists.append(self.revsorted_list(n, offset))
lists.append(self.duprandom_list(n, offset))
return lists
class BaseTimsortTest(BaseSortingTest):
def merge_init(self, keys):
f = self.timsort.merge_init
return f(keys)
def test_binarysort(self):
n = 20
def check(l, n, start=0):
res = self.array_factory(l)
f(res, res, 0, n, start)
self.assertSorted(l, res)
f = self.timsort.binarysort
l = self.sorted_list(n)
check(l, n)
check(l, n, n//2)
l = self.revsorted_list(n)
check(l, n)
l = self.initially_sorted_list(n, n//2)
check(l, n)
check(l, n, n//2)
l = self.revsorted_list(n)
check(l, n)
l = self.random_list(n)
check(l, n)
l = self.duprandom_list(n)
check(l, n)
def test_binarysort_with_values(self):
n = 20
v = list(range(100, 100+n))
def check(l, n, start=0):
res = self.array_factory(l)
res_v = self.array_factory(v)
f(res, res_v, 0, n, start)
self.assertSortedValues(l, v, res, res_v)
f = self.timsort.binarysort
l = self.sorted_list(n)
check(l, n)
check(l, n, n//2)
l = self.revsorted_list(n)
check(l, n)
l = self.initially_sorted_list(n, n//2)
check(l, n)
check(l, n, n//2)
l = self.revsorted_list(n)
check(l, n)
l = self.random_list(n)
check(l, n)
l = self.duprandom_list(n)
check(l, n)
def test_count_run(self):
n = 16
f = self.timsort.count_run
def check(l, lo, hi):
n, desc = f(self.array_factory(l), lo, hi)
# Fully check invariants
if desc:
for k in range(lo, lo + n - 1):
a, b = l[k], l[k + 1]
self.assertGreater(a, b)
if lo + n < hi:
self.assertLessEqual(l[lo + n - 1], l[lo + n])
else:
for k in range(lo, lo + n - 1):
a, b = l[k], l[k + 1]
self.assertLessEqual(a, b)
if lo + n < hi:
self.assertGreater(l[lo + n - 1], l[lo + n], l)
l = self.sorted_list(n, offset=100)
check(l, 0, n)
check(l, 1, n - 1)
check(l, 1, 2)
l = self.revsorted_list(n, offset=100)
check(l, 0, n)
check(l, 1, n - 1)
check(l, 1, 2)
l = self.random_list(n, offset=100)
for i in range(len(l) - 1):
check(l, i, n)
l = self.duprandom_list(n, offset=100)
for i in range(len(l) - 1):
check(l, i, n)
def test_gallop_left(self):
n = 20
f = self.timsort.gallop_left
def check(l, key, start, stop, hint):
k = f(key, l, start, stop, hint)
# Fully check invariants
self.assertGreaterEqual(k, start)
self.assertLessEqual(k, stop)
if k > start:
self.assertLess(l[k - 1], key)
if k < stop:
self.assertGreaterEqual(l[k], key)
def check_all_hints(l, key, start, stop):
for hint in range(start, stop):
check(l, key, start, stop, hint)
def check_sorted_list(l):
l = self.array_factory(l)
for key in (l[5], l[15], l[0], -1000, l[-1], 1000):
check_all_hints(l, key, 0, n)
check_all_hints(l, key, 1, n - 1)
check_all_hints(l, key, 8, n - 8)
l = self.sorted_list(n, offset=100)
check_sorted_list(l)
l = self.dupsorted_list(n, offset=100)
check_sorted_list(l)
def test_gallop_right(self):
n = 20
f = self.timsort.gallop_right
def check(l, key, start, stop, hint):
k = f(key, l, start, stop, hint)
# Fully check invariants
self.assertGreaterEqual(k, start)
self.assertLessEqual(k, stop)
if k > start:
self.assertLessEqual(l[k - 1], key)
if k < stop:
self.assertGreater(l[k], key)
def check_all_hints(l, key, start, stop):
for hint in range(start, stop):
check(l, key, start, stop, hint)
def check_sorted_list(l):
l = self.array_factory(l)
for key in (l[5], l[15], l[0], -1000, l[-1], 1000):
check_all_hints(l, key, 0, n)
check_all_hints(l, key, 1, n - 1)
check_all_hints(l, key, 8, n - 8)
l = self.sorted_list(n, offset=100)
check_sorted_list(l)
l = self.dupsorted_list(n, offset=100)
check_sorted_list(l)
def test_merge_compute_minrun(self):
f = self.timsort.merge_compute_minrun
for i in range(0, 64):
self.assertEqual(f(i), i)
for i in range(6, 63):
if 2**i > sys.maxsize:
break
self.assertEqual(f(2**i), 32)
for i in self.fibo():
if i < 64:
continue
if i >= sys.maxsize:
break
k = f(i)
self.assertGreaterEqual(k, 32)
self.assertLessEqual(k, 64)
if i > 500:
# i/k is close to, but strictly less than, an exact power of 2
quot = i // k
p = 2 ** utils.bit_length(quot)
self.assertLess(quot, p)
self.assertGreaterEqual(quot, 0.9 * p)
def check_merge_lo_hi(self, func, a, b):
na = len(a)
nb = len(b)
# Add sentinels at start and end, to check they weren't moved
orig_keys = [42] + a + b + [-42]
keys = self.array_factory(orig_keys)
ms = self.merge_init(keys)
ssa = 1
ssb = ssa + na
#new_ms = func(ms, keys, [], ssa, na, ssb, nb)
new_ms = func(ms, keys, keys, ssa, na, ssb, nb)
self.assertEqual(keys[0], orig_keys[0])
self.assertEqual(keys[-1], orig_keys[-1])
self.assertSorted(orig_keys[1:-1], keys[1:-1])
# Check the MergeState result
self.assertGreaterEqual(len(new_ms.keys), len(ms.keys))
self.assertGreaterEqual(len(new_ms.values), len(ms.values))
self.assertIs(new_ms.pending, ms.pending)
self.assertGreaterEqual(new_ms.min_gallop, 1)
def test_merge_lo_hi(self):
f_lo = self.timsort.merge_lo
f_hi = self.timsort.merge_hi
# The larger sizes exercise galloping
for (na, nb) in [(12, 16), (40, 40), (100, 110), (1000, 1100)]:
for a, b in itertools.product(self.make_sample_sorted_lists(na),
self.make_sample_sorted_lists(nb)):
self.check_merge_lo_hi(f_lo, a, b)
self.check_merge_lo_hi(f_hi, b, a)
def check_merge_at(self, a, b):
f = self.timsort.merge_at
# Prepare the array to be sorted
na = len(a)
nb = len(b)
# Add sentinels at start and end, to check they weren't moved
orig_keys = [42] + a + b + [-42]
ssa = 1
ssb = ssa + na
stack_sentinel = MergeRun(-42, -42)
def run_merge_at(ms, keys, i):
new_ms = f(ms, keys, keys, i)
self.assertEqual(keys[0], orig_keys[0])
self.assertEqual(keys[-1], orig_keys[-1])
self.assertSorted(orig_keys[1:-1], keys[1:-1])
# Check stack state
self.assertIs(new_ms.pending, ms.pending)
self.assertEqual(ms.pending[i], (ssa, na + nb))
self.assertEqual(ms.pending[0], stack_sentinel)
return new_ms
# First check with i == len(stack) - 2
keys = self.array_factory(orig_keys)
ms = self.merge_init(keys)
# Push sentinel on stack, to check it was't touched
ms = self.timsort.merge_append(ms, stack_sentinel)
i = ms.n
ms = self.timsort.merge_append(ms, MergeRun(ssa, na))
ms = self.timsort.merge_append(ms, MergeRun(ssb, nb))
ms = run_merge_at(ms, keys, i)
self.assertEqual(ms.n, i + 1)
# Now check with i == len(stack) - 3
keys = self.array_factory(orig_keys)
ms = self.merge_init(keys)
# Push sentinel on stack, to check it was't touched
ms = self.timsort.merge_append(ms, stack_sentinel)
i = ms.n
ms = self.timsort.merge_append(ms, MergeRun(ssa, na))
ms = self.timsort.merge_append(ms, MergeRun(ssb, nb))
# A last run (trivial here)
last_run = MergeRun(ssb + nb, 1)
ms = self.timsort.merge_append(ms, last_run)
ms = run_merge_at(ms, keys, i)
self.assertEqual(ms.n, i + 2)
self.assertEqual(ms.pending[ms.n - 1], last_run)
def test_merge_at(self):
# The larger sizes exercise galloping
for (na, nb) in [(12, 16), (40, 40), (100, 110), (500, 510)]:
for a, b in itertools.product(self.make_sample_sorted_lists(na),
self.make_sample_sorted_lists(nb)):
self.check_merge_at(a, b)
self.check_merge_at(b, a)
def test_merge_force_collapse(self):
f = self.timsort.merge_force_collapse
# Test with runs of ascending sizes, then descending sizes
sizes_list = [(8, 10, 15, 20)]
sizes_list.append(sizes_list[0][::-1])
for sizes in sizes_list:
for chunks in itertools.product(*(self.make_sample_sorted_lists(n)
for n in sizes)):
# Create runs of the given sizes
orig_keys = sum(chunks, [])
keys = self.array_factory(orig_keys)
ms = self.merge_init(keys)
pos = 0
for c in chunks:
ms = self.timsort.merge_append(ms, MergeRun(pos, len(c)))
pos += len(c)
# Sanity check
self.assertEqual(sum(ms.pending[ms.n - 1]), len(keys))
# Now merge the runs
ms = f(ms, keys, keys)
# Remaining run is the whole list
self.assertEqual(ms.n, 1)
self.assertEqual(ms.pending[0], MergeRun(0, len(keys)))
# The list is now sorted
self.assertSorted(orig_keys, keys)
def test_run_timsort(self):
f = self.timsort.run_timsort
for size_factor in (1, 10):
# Make lists to be sorted from three chunks of different kinds.
sizes = (15, 30, 20)
all_lists = [self.make_sample_lists(n * size_factor) for n in sizes]
for chunks in itertools.product(*all_lists):
orig_keys = sum(chunks, [])
keys = self.array_factory(orig_keys)
f(keys)
# The list is now sorted
self.assertSorted(orig_keys, keys)
def test_run_timsort_with_values(self):
# Run timsort, but also with a values array
f = self.timsort.run_timsort_with_values
for size_factor in (1, 5):
chunk_size = 80 * size_factor
a = self.dupsorted_list(chunk_size)
b = self.duprandom_list(chunk_size)
c = self.revsorted_list(chunk_size)
orig_keys = a + b + c
orig_values = list(range(1000, 1000 + len(orig_keys)))
keys = self.array_factory(orig_keys)
values = self.array_factory(orig_values)
f(keys, values)
# This checks sort stability
self.assertSortedValues(orig_keys, orig_values, keys, values)
class TestTimsortPurePython(BaseTimsortTest, TestCase):
timsort = py_list_timsort
# Much faster than a Numpy array in pure Python
array_factory = list
class TestTimsortArraysPurePython(BaseTimsortTest, TestCase):
timsort = py_array_timsort
def array_factory(self, lst):
return np.array(lst, dtype=np.int32)
class JITTimsortMixin(object):
timsort = jit_array_timsort
test_merge_at = None
test_merge_force_collapse = None
def wrap_with_mergestate(self, timsort, func, _cache={}):
"""
Wrap *func* into another compiled function inserting a runtime-created
mergestate as the first function argument.
"""
key = timsort, func
if key in _cache:
return _cache[key]
merge_init = timsort.merge_init
@timsort.compile
def wrapper(keys, values, *args):
ms = merge_init(keys)
res = func(ms, keys, values, *args)
return res
_cache[key] = wrapper
return wrapper
class TestTimsortArrays(JITTimsortMixin, BaseTimsortTest, TestCase):
def array_factory(self, lst):
return np.array(lst, dtype=np.int32)
def check_merge_lo_hi(self, func, a, b):
na = len(a)
nb = len(b)
func = self.wrap_with_mergestate(self.timsort, func)
# Add sentinels at start and end, to check they weren't moved
orig_keys = [42] + a + b + [-42]
keys = self.array_factory(orig_keys)
ssa = 1
ssb = ssa + na
new_ms = func(keys, keys, ssa, na, ssb, nb)
self.assertEqual(keys[0], orig_keys[0])
self.assertEqual(keys[-1], orig_keys[-1])
self.assertSorted(orig_keys[1:-1], keys[1:-1])
class BaseQuicksortTest(BaseSortingTest):
# NOTE these tests assume a non-argsort quicksort.
def test_insertion_sort(self):
n = 20
def check(l, n):
res = self.array_factory([9999] + l + [-9999])
f(res, res, 1, n)
self.assertEqual(res[0], 9999)
self.assertEqual(res[-1], -9999)
self.assertSorted(l, res[1:-1])
f = self.quicksort.insertion_sort
l = self.sorted_list(n)
check(l, n)
l = self.revsorted_list(n)
check(l, n)
l = self.initially_sorted_list(n, n//2)
check(l, n)
l = self.revsorted_list(n)
check(l, n)
l = self.random_list(n)
check(l, n)
l = self.duprandom_list(n)
check(l, n)
def test_partition(self):
n = 20
def check(l, n):
res = self.array_factory([9999] + l + [-9999])
index = f(res, res, 1, n)
self.assertEqual(res[0], 9999)
self.assertEqual(res[-1], -9999)
pivot = res[index]
for i in range(1, index):
self.assertLessEqual(res[i], pivot)
for i in range(index + 1, n):
self.assertGreaterEqual(res[i], pivot)
f = self.quicksort.partition
l = self.sorted_list(n)
check(l, n)
l = self.revsorted_list(n)
check(l, n)
l = self.initially_sorted_list(n, n//2)
check(l, n)
l = self.revsorted_list(n)
check(l, n)
l = self.random_list(n)
check(l, n)
l = self.duprandom_list(n)
check(l, n)
def test_partition3(self):
# Test the unused partition3() function
n = 20
def check(l, n):
res = self.array_factory([9999] + l + [-9999])
lt, gt = f(res, 1, n)
self.assertEqual(res[0], 9999)
self.assertEqual(res[-1], -9999)
pivot = res[lt]
for i in range(1, lt):
self.assertLessEqual(res[i], pivot)
for i in range(lt, gt + 1):
self.assertEqual(res[i], pivot)
for i in range(gt + 1, n):
self.assertGreater(res[i], pivot)
f = self.quicksort.partition3
l = self.sorted_list(n)
check(l, n)
l = self.revsorted_list(n)
check(l, n)
l = self.initially_sorted_list(n, n//2)
check(l, n)
l = self.revsorted_list(n)
check(l, n)
l = self.random_list(n)
check(l, n)
l = self.duprandom_list(n)
check(l, n)
def test_run_quicksort(self):
f = self.quicksort.run_quicksort
for size_factor in (1, 5):
# Make lists to be sorted from two chunks of different kinds.
sizes = (15, 20)
all_lists = [self.make_sample_lists(n * size_factor) for n in sizes]
for chunks in itertools.product(*all_lists):
orig_keys = sum(chunks, [])
keys = self.array_factory(orig_keys)
f(keys)
# The list is now sorted
self.assertSorted(orig_keys, keys)
def test_run_quicksort_lt(self):
def lt(a, b):
return a > b
f = self.make_quicksort(lt=lt).run_quicksort
for size_factor in (1, 5):
# Make lists to be sorted from two chunks of different kinds.
sizes = (15, 20)
all_lists = [self.make_sample_lists(n * size_factor) for n in sizes]
for chunks in itertools.product(*all_lists):
orig_keys = sum(chunks, [])
keys = self.array_factory(orig_keys)
f(keys)
# The list is now rev-sorted
self.assertSorted(orig_keys, keys[::-1])
# An imperfect comparison function, as LT(a, b) does not imply not LT(b, a).
# The sort should handle it gracefully.
def lt_floats(a, b):
return math.isnan(b) or a < b
f = self.make_quicksort(lt=lt_floats).run_quicksort
np.random.seed(42)
for size in (5, 20, 50, 500):
orig = np.random.random(size=size) * 100
orig[np.random.random(size=size) < 0.1] = float('nan')
orig_keys = list(orig)
keys = self.array_factory(orig_keys)
f(keys)
non_nans = orig[~np.isnan(orig)]
# Non-NaNs are sorted at the front
self.assertSorted(non_nans, keys[:len(non_nans)])
class TestQuicksortPurePython(BaseQuicksortTest, TestCase):
quicksort = py_quicksort
make_quicksort = staticmethod(make_py_quicksort)
# Much faster than a Numpy array in pure Python
array_factory = list
class TestQuicksortArrays(BaseQuicksortTest, TestCase):
quicksort = jit_quicksort
make_quicksort = staticmethod(make_jit_quicksort)
def array_factory(self, lst):
return np.array(lst, dtype=np.float64)
class TestNumpySort(TestCase):
def setUp(self):
np.random.seed(42)
def int_arrays(self):
for size in (5, 20, 50, 500):
yield np.random.randint(99, size=size)
def float_arrays(self):
for size in (5, 20, 50, 500):
yield np.random.random(size=size) * 100
# Now with NaNs. Numpy sorts them at the end.
for size in (5, 20, 50, 500):
orig = np.random.random(size=size) * 100
orig[np.random.random(size=size) < 0.1] = float('nan')
yield orig
def has_duplicates(self, arr):
"""
Whether the array has duplicates. Takes NaNs into account.
"""
if np.count_nonzero(np.isnan(arr)) > 1:
return True
if np.unique(arr).size < arr.size:
return True
return False
def check_sort_inplace(self, pyfunc, cfunc, val):
expected = copy.copy(val)
got = copy.copy(val)
pyfunc(expected)
cfunc(got)
self.assertPreciseEqual(got, expected)
def check_sort_copy(self, pyfunc, cfunc, val):
orig = copy.copy(val)
expected = pyfunc(val)
got = cfunc(val)
self.assertPreciseEqual(got, expected)
# The original wasn't mutated
self.assertPreciseEqual(val, orig)
def check_argsort(self, pyfunc, cfunc, val, kwargs={}):
orig = copy.copy(val)
expected = pyfunc(val, **kwargs)
got = cfunc(val, **kwargs)
self.assertPreciseEqual(orig[got], np.sort(orig),
msg="the array wasn't argsorted")
# Numba and Numpy results may differ if there are duplicates
# in the array
if not self.has_duplicates(orig):
self.assertPreciseEqual(got, expected)
# The original wasn't mutated
self.assertPreciseEqual(val, orig)
def test_array_sort_int(self):
pyfunc = sort_usecase
cfunc = jit(nopython=True)(pyfunc)
for orig in self.int_arrays():
self.check_sort_inplace(pyfunc, cfunc, orig)
def test_array_sort_float(self):
pyfunc = sort_usecase
cfunc = jit(nopython=True)(pyfunc)
for orig in self.float_arrays():
self.check_sort_inplace(pyfunc, cfunc, orig)
def test_np_sort_int(self):
pyfunc = np_sort_usecase
cfunc = jit(nopython=True)(pyfunc)
for orig in self.int_arrays():
self.check_sort_copy(pyfunc, cfunc, orig)
def test_np_sort_float(self):
pyfunc = np_sort_usecase
cfunc = jit(nopython=True)(pyfunc)
for size in (5, 20, 50, 500):
orig = np.random.random(size=size) * 100
orig[np.random.random(size=size) < 0.1] = float('nan')
self.check_sort_copy(pyfunc, cfunc, orig)
def test_argsort_int(self):
def check(pyfunc):
cfunc = jit(nopython=True)(pyfunc)
for orig in self.int_arrays():
self.check_argsort(pyfunc, cfunc, orig)
check(argsort_usecase)
check(np_argsort_usecase)
def test_argsort_kind_int(self):
def check(pyfunc, is_stable):
cfunc = jit(nopython=True)(pyfunc)
for orig in self.int_arrays():
self.check_argsort(pyfunc, cfunc, orig,
dict(is_stable=is_stable))
check(argsort_kind_usecase, is_stable=True)
check(np_argsort_kind_usecase, is_stable=True)
check(argsort_kind_usecase, is_stable=False)
check(np_argsort_kind_usecase, is_stable=False)
def test_argsort_float(self):
def check(pyfunc):
cfunc = jit(nopython=True)(pyfunc)
for orig in self.float_arrays():
self.check_argsort(pyfunc, cfunc, orig)
check(argsort_usecase)
check(np_argsort_usecase)
def test_argsort_float(self):
def check(pyfunc, is_stable):
cfunc = jit(nopython=True)(pyfunc)
for orig in self.float_arrays():
self.check_argsort(pyfunc, cfunc, orig,
dict(is_stable=is_stable))
check(argsort_kind_usecase, is_stable=True)
check(np_argsort_kind_usecase, is_stable=True)
check(argsort_kind_usecase, is_stable=False)
check(np_argsort_kind_usecase, is_stable=False)
class TestPythonSort(TestCase):
def test_list_sort(self):
pyfunc = list_sort_usecase
cfunc = jit(nopython=True)(pyfunc)
for size in (20, 50, 500):
orig, ret = cfunc(size)
self.assertEqual(sorted(orig), ret)
self.assertNotEqual(orig, ret) # sanity check
def test_list_sort_reverse(self):
pyfunc = list_sort_reverse_usecase
cfunc = jit(nopython=True)(pyfunc)
for size in (20, 50, 500):
for b in (False, True):
orig, ret = cfunc(size, b)
self.assertEqual(sorted(orig, reverse=b), ret)
self.assertNotEqual(orig, ret) # sanity check
def test_sorted(self):
pyfunc = sorted_usecase
cfunc = jit(nopython=True)(pyfunc)
for size in (20, 50, 500):
orig = np.random.random(size=size) * 100
expected = sorted(orig)
got = cfunc(orig)
self.assertPreciseEqual(got, expected)
self.assertNotEqual(list(orig), got) # sanity check
def test_sorted_reverse(self):
pyfunc = sorted_reverse_usecase
cfunc = jit(nopython=True)(pyfunc)
size = 20
orig = np.random.random(size=size) * 100
for b in (False, True):
expected = sorted(orig, reverse=b)
got = cfunc(orig, b)
self.assertPreciseEqual(got, expected)
self.assertNotEqual(list(orig), got) # sanity check
class TestMergeSort(TestCase):
def setUp(self):
np.random.seed(321)
def check_argsort_stable(self, sorter, low, high, count):
# make data with high possibility of duplicated key
data = np.random.randint(low, high, count)
expect = np.argsort(data, kind='mergesort')
got = sorter(data)
np.testing.assert_equal(expect, got)
def test_argsort_stable(self):
arglist = [
(-2, 2, 5),
(-5, 5, 10),
(0, 10, 101),
(0, 100, 1003),
]
imp = make_jit_mergesort(is_argsort=True)
toplevel = imp.run_mergesort
sorter = njit(lambda arr: toplevel(arr))
for args in arglist:
self.check_argsort_stable(sorter, *args)
nop_compiler = lambda x:x
class TestSortSlashSortedWithKey(MemoryLeakMixin, TestCase):
def test_01(self):
a = [3, 1, 4, 1, 5, 9]
@njit
def external_key(z):
return 1. / z
@njit
def foo(x, key=None):
new_x = x[:]
new_x.sort(key=key)
return sorted(x[:], key=key), new_x
self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:]))
self.assertPreciseEqual(foo(a[:], external_key),
foo.py_func(a[:], external_key))
def test_02(self):
a = [3, 1, 4, 1, 5, 9]
@njit
def foo(x):
def closure_key(z):
return 1. / z
new_x = x[:]
new_x.sort(key=closure_key)
return sorted(x[:], key=closure_key), new_x
self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:]))
def test_03(self):
a = [3, 1, 4, 1, 5, 9]
def gen(compiler):
@compiler
def bar(x, func):
new_x = x[:]
new_x.sort(key=func)
return sorted(x[:], key=func), new_x
@compiler
def foo(x):
def closure_escapee_key(z):
return 1. / z
return bar(x, closure_escapee_key)
return foo
self.assertPreciseEqual(gen(njit)(a[:]), gen(nop_compiler)(a[:]))
def test_04(self):
a = ['a','b','B','b','C','A']
@njit
def external_key(z):
return z.upper()
@njit
def foo(x, key=None):
new_x = x[:]
new_x.sort(key=key)
return sorted(x[:], key=key), new_x
self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:]))
self.assertPreciseEqual(foo(a[:], external_key),
foo.py_func(a[:], external_key))
def test_05(self):
a = ['a','b','B','b','C','A']
@njit
def external_key(z):
return z.upper()
@njit
def foo(x, key=None, reverse=False):
new_x = x[:]
new_x.sort(key=key, reverse=reverse)
return (sorted(x[:], key=key, reverse=reverse), new_x)
for key, rev in itertools.product((None, external_key),
(True, False, 1, -12, 0)):
self.assertPreciseEqual(foo(a[:], key, rev),
foo.py_func(a[:], key, rev))
def test_optional_on_key(self):
a = [3, 1, 4, 1, 5, 9]
@njit
def foo(x, predicate):
if predicate:
def closure_key(z):
return 1. / z
else:
closure_key = None
new_x = x[:]
new_x.sort(key=closure_key)
return (sorted(x[:], key=closure_key), new_x)
with self.assertRaises(errors.TypingError) as raises:
TF = True
foo(a[:], TF)
msg = "Key must concretely be None or a Numba JIT compiled function"
self.assertIn(msg, str(raises.exception))
def test_exceptions_sorted(self):
@njit
def foo_sorted(x, key=None, reverse=False):
return sorted(x[:], key=key, reverse=reverse)
@njit
def foo_sort(x, key=None, reverse=False):
new_x = x[:]
new_x.sort(key=key, reverse=reverse)
return new_x
@njit
def external_key(z):
return 1. / z
a = [3, 1, 4, 1, 5, 9]
for impl in (foo_sort, foo_sorted):
# check illegal key
with self.assertRaises(errors.TypingError) as raises:
impl(a, key="illegal")
expect = "Key must be None or a Numba JIT compiled function"
self.assertIn(expect, str(raises.exception))
# check illegal reverse
with self.assertRaises(errors.TypingError) as raises:
impl(a, key=external_key, reverse="go backwards")
expect = "an integer is required for 'reverse'"
self.assertIn(expect, str(raises.exception))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 8,587,712,500,323,978,000 | 30.540037 | 84 | 0.537551 | false |
FireballDWF/cloud-custodian | tools/c7n_salactus/setup.py | 5 | 1310 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import os
description = ""
if os.path.exists('README.md'):
description = open('README.md').read()
setup(
name="c7n_salactus",
version='0.3.0',
description="Cloud Custodian - Salactus S3",
long_description=description,
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/cloud-custodian/cloud-custodian",
author="Kapil Thangavelu",
license="Apache-2.0",
packages=find_packages(),
entry_points={
'console_scripts': [
'c7n-salactus = c7n_salactus.cli:cli']},
install_requires=["c7n", "click", "rq", "redis"],
)
| apache-2.0 | -8,154,533,179,692,813,000 | 32.589744 | 74 | 0.69313 | false |
trungnt13/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause | 992,158,262,290,243,100 | 36.304147 | 79 | 0.633354 | false |
SaschaMester/delicium | third_party/mojo/src/mojo/public/tools/bindings/generators/mojom_dart_generator.py | 1 | 17908 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates dart source files from a mojom.Module."""
import os
import re
import shutil
import sys
import mojom.generate.constant_resolver as resolver
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
from mojom.generate.template_expander import UseJinja
GENERATOR_PREFIX = 'dart'
_kind_to_dart_default_value = {
mojom.BOOL: "false",
mojom.INT8: "0",
mojom.UINT8: "0",
mojom.INT16: "0",
mojom.UINT16: "0",
mojom.INT32: "0",
mojom.UINT32: "0",
mojom.FLOAT: "0.0",
mojom.HANDLE: "null",
mojom.DCPIPE: "null",
mojom.DPPIPE: "null",
mojom.MSGPIPE: "null",
mojom.SHAREDBUFFER: "null",
mojom.NULLABLE_HANDLE: "null",
mojom.NULLABLE_DCPIPE: "null",
mojom.NULLABLE_DPPIPE: "null",
mojom.NULLABLE_MSGPIPE: "null",
mojom.NULLABLE_SHAREDBUFFER: "null",
mojom.INT64: "0",
mojom.UINT64: "0",
mojom.DOUBLE: "0.0",
mojom.STRING: "null",
mojom.NULLABLE_STRING: "null"
}
_kind_to_dart_decl_type = {
mojom.BOOL: "bool",
mojom.INT8: "int",
mojom.UINT8: "int",
mojom.INT16: "int",
mojom.UINT16: "int",
mojom.INT32: "int",
mojom.UINT32: "int",
mojom.FLOAT: "double",
mojom.HANDLE: "core.MojoHandle",
mojom.DCPIPE: "core.MojoDataPipeConsumer",
mojom.DPPIPE: "core.MojoDataPipeProducer",
mojom.MSGPIPE: "core.MojoMessagePipeEndpoint",
mojom.SHAREDBUFFER: "core.MojoSharedBuffer",
mojom.NULLABLE_HANDLE: "core.MojoHandle",
mojom.NULLABLE_DCPIPE: "core.MojoDataPipeConsumer",
mojom.NULLABLE_DPPIPE: "core.MojoDataPipeProducer",
mojom.NULLABLE_MSGPIPE: "core.MojoMessagePipeEndpoint",
mojom.NULLABLE_SHAREDBUFFER: "core.MojoSharedBuffer",
mojom.INT64: "int",
mojom.UINT64: "int",
mojom.DOUBLE: "double",
mojom.STRING: "String",
mojom.NULLABLE_STRING: "String"
}
_spec_to_decode_method = {
mojom.BOOL.spec: 'decodeBool',
mojom.DCPIPE.spec: 'decodeConsumerHandle',
mojom.DOUBLE.spec: 'decodeDouble',
mojom.DPPIPE.spec: 'decodeProducerHandle',
mojom.FLOAT.spec: 'decodeFloat',
mojom.HANDLE.spec: 'decodeHandle',
mojom.INT16.spec: 'decodeInt16',
mojom.INT32.spec: 'decodeInt32',
mojom.INT64.spec: 'decodeInt64',
mojom.INT8.spec: 'decodeInt8',
mojom.MSGPIPE.spec: 'decodeMessagePipeHandle',
mojom.NULLABLE_DCPIPE.spec: 'decodeConsumerHandle',
mojom.NULLABLE_DPPIPE.spec: 'decodeProducerHandle',
mojom.NULLABLE_HANDLE.spec: 'decodeHandle',
mojom.NULLABLE_MSGPIPE.spec: 'decodeMessagePipeHandle',
mojom.NULLABLE_SHAREDBUFFER.spec: 'decodeSharedBufferHandle',
mojom.NULLABLE_STRING.spec: 'decodeString',
mojom.SHAREDBUFFER.spec: 'decodeSharedBufferHandle',
mojom.STRING.spec: 'decodeString',
mojom.UINT16.spec: 'decodeUint16',
mojom.UINT32.spec: 'decodeUint32',
mojom.UINT64.spec: 'decodeUint64',
mojom.UINT8.spec: 'decodeUint8',
}
_spec_to_encode_method = {
mojom.BOOL.spec: 'encodeBool',
mojom.DCPIPE.spec: 'encodeConsumerHandle',
mojom.DOUBLE.spec: 'encodeDouble',
mojom.DPPIPE.spec: 'encodeProducerHandle',
mojom.FLOAT.spec: 'encodeFloat',
mojom.HANDLE.spec: 'encodeHandle',
mojom.INT16.spec: 'encodeInt16',
mojom.INT32.spec: 'encodeInt32',
mojom.INT64.spec: 'encodeInt64',
mojom.INT8.spec: 'encodeInt8',
mojom.MSGPIPE.spec: 'encodeMessagePipeHandle',
mojom.NULLABLE_DCPIPE.spec: 'encodeConsumerHandle',
mojom.NULLABLE_DPPIPE.spec: 'encodeProducerHandle',
mojom.NULLABLE_HANDLE.spec: 'encodeHandle',
mojom.NULLABLE_MSGPIPE.spec: 'encodeMessagePipeHandle',
mojom.NULLABLE_SHAREDBUFFER.spec: 'encodeSharedBufferHandle',
mojom.NULLABLE_STRING.spec: 'encodeString',
mojom.SHAREDBUFFER.spec: 'encodeSharedBufferHandle',
mojom.STRING.spec: 'encodeString',
mojom.UINT16.spec: 'encodeUint16',
mojom.UINT32.spec: 'encodeUint32',
mojom.UINT64.spec: 'encodeUint64',
mojom.UINT8.spec: 'encodeUint8',
}
def GetDartType(kind):
if kind.imported_from:
return kind.imported_from["unique_name"] + "." + GetNameForElement(kind)
return GetNameForElement(kind)
def DartDefaultValue(field):
if field.default:
if mojom.IsStructKind(field.kind):
assert field.default == "default"
return "new %s()" % GetDartType(field.kind)
return ExpressionToText(field.default)
if field.kind in mojom.PRIMITIVES:
return _kind_to_dart_default_value[field.kind]
if mojom.IsStructKind(field.kind):
return "null"
if mojom.IsUnionKind(field.kind):
return "null"
if mojom.IsArrayKind(field.kind):
return "null"
if mojom.IsMapKind(field.kind):
return "null"
if mojom.IsInterfaceKind(field.kind) or \
mojom.IsInterfaceRequestKind(field.kind):
return "null"
if mojom.IsEnumKind(field.kind):
return "0"
def DartDeclType(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_dart_decl_type[kind]
if mojom.IsStructKind(kind):
return GetDartType(kind)
if mojom.IsUnionKind(kind):
return "%sWrapper" % GetDartType(kind)
if mojom.IsArrayKind(kind):
array_type = DartDeclType(kind.kind)
return "List<" + array_type + ">"
if mojom.IsMapKind(kind):
key_type = DartDeclType(kind.key_kind)
value_type = DartDeclType(kind.value_kind)
return "Map<"+ key_type + ", " + value_type + ">"
if mojom.IsInterfaceKind(kind) or \
mojom.IsInterfaceRequestKind(kind):
return "Object"
if mojom.IsEnumKind(kind):
return "int"
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def UpperCamelCase(name):
return ''.join([x.capitalize() for x in NameToComponent(name)])
def CamelCase(name):
uccc = UpperCamelCase(name)
return uccc[0].lower() + uccc[1:]
def ConstantStyle(name):
components = NameToComponent(name)
if components[0] == 'k' and len(components) > 1:
components = components[1:]
# variable cannot starts with a digit.
if components[0][0].isdigit():
components[0] = '_' + components[0]
return '_'.join([x.upper() for x in components])
def DotToUnderscore(name):
return name.replace('.', '_')
def GetNameForElement(element):
if (mojom.IsEnumKind(element) or mojom.IsInterfaceKind(element) or
mojom.IsStructKind(element) or mojom.IsUnionKind(element)):
return UpperCamelCase(element.name)
if mojom.IsInterfaceRequestKind(element):
return GetNameForElement(element.kind)
if isinstance(element, (mojom.Method,
mojom.Parameter,
mojom.StructField)):
return CamelCase(element.name)
if isinstance(element, mojom.UnionField):
return "f%s" % UpperCamelCase(element.name)
if isinstance(element, mojom.EnumValue):
return (GetNameForElement(element.enum) + '.' +
ConstantStyle(element.name))
if isinstance(element, (mojom.NamedValue,
mojom.Constant,
mojom.EnumField)):
return ConstantStyle(element.name)
raise Exception('Unexpected element: %s' % element)
def GetUnionFieldTagName(element):
if not isinstance(element, mojom.UnionField):
raise Exception('Unexpected element: %s is not a union field.' % element)
return 'tag%s' % UpperCamelCase(element.name)
def GetInterfaceResponseName(method):
return UpperCamelCase(method.name + 'Response')
def GetDartTrueFalse(value):
return 'true' if value else 'false'
def GetArrayNullabilityFlags(kind):
"""Returns nullability flags for an array type, see codec.dart.
As we have dedicated decoding functions for arrays, we have to pass
nullability information about both the array itself, as well as the array
element type there.
"""
assert mojom.IsArrayKind(kind)
ARRAY_NULLABLE = 'bindings.kArrayNullable'
ELEMENT_NULLABLE = 'bindings.kElementNullable'
NOTHING_NULLABLE = 'bindings.kNothingNullable'
flags_to_set = []
if mojom.IsNullableKind(kind):
flags_to_set.append(ARRAY_NULLABLE)
if mojom.IsNullableKind(kind.kind):
flags_to_set.append(ELEMENT_NULLABLE)
if not flags_to_set:
flags_to_set = [NOTHING_NULLABLE]
return ' | '.join(flags_to_set)
def AppendDecodeParams(initial_params, kind, bit):
""" Appends standard parameters for decode calls. """
params = list(initial_params)
if (kind == mojom.BOOL):
params.append(str(bit))
if mojom.IsReferenceKind(kind):
if mojom.IsArrayKind(kind):
params.append(GetArrayNullabilityFlags(kind))
else:
params.append(GetDartTrueFalse(mojom.IsNullableKind(kind)))
if mojom.IsInterfaceKind(kind):
params.append('%sProxy.newFromEndpoint' % GetDartType(kind))
if mojom.IsArrayKind(kind) and mojom.IsInterfaceKind(kind.kind):
params.append('%sProxy.newFromEndpoint' % GetDartType(kind.kind))
if mojom.IsInterfaceRequestKind(kind):
params.append('%sStub.newFromEndpoint' % GetDartType(kind.kind))
if mojom.IsArrayKind(kind) and mojom.IsInterfaceRequestKind(kind.kind):
params.append('%sStub.newFromEndpoint' % GetDartType(kind.kind.kind))
if mojom.IsArrayKind(kind):
params.append(GetArrayExpectedLength(kind))
return params
def AppendEncodeParams(initial_params, kind, bit):
""" Appends standard parameters shared between encode and decode calls. """
params = list(initial_params)
if (kind == mojom.BOOL):
params.append(str(bit))
if mojom.IsReferenceKind(kind):
if mojom.IsArrayKind(kind):
params.append(GetArrayNullabilityFlags(kind))
else:
params.append(GetDartTrueFalse(mojom.IsNullableKind(kind)))
if mojom.IsArrayKind(kind):
params.append(GetArrayExpectedLength(kind))
return params
def DecodeMethod(kind, offset, bit):
def _DecodeMethodName(kind):
if mojom.IsArrayKind(kind):
return _DecodeMethodName(kind.kind) + 'Array'
if mojom.IsEnumKind(kind):
return _DecodeMethodName(mojom.INT32)
if mojom.IsInterfaceRequestKind(kind):
return 'decodeInterfaceRequest'
if mojom.IsInterfaceKind(kind):
return 'decodeServiceInterface'
return _spec_to_decode_method[kind.spec]
methodName = _DecodeMethodName(kind)
params = AppendDecodeParams([ str(offset) ], kind, bit)
return '%s(%s)' % (methodName, ', '.join(params))
def EncodeMethod(kind, variable, offset, bit):
def _EncodeMethodName(kind):
if mojom.IsStructKind(kind):
return 'encodeStruct'
if mojom.IsUnionKind(kind):
return 'encodeUnion'
if mojom.IsArrayKind(kind):
return _EncodeMethodName(kind.kind) + 'Array'
if mojom.IsEnumKind(kind):
return _EncodeMethodName(mojom.INT32)
if mojom.IsInterfaceRequestKind(kind):
return 'encodeInterfaceRequest'
if mojom.IsInterfaceKind(kind):
return 'encodeInterface'
return _spec_to_encode_method[kind.spec]
methodName = _EncodeMethodName(kind)
params = AppendEncodeParams([ variable, str(offset) ], kind, bit)
return '%s(%s)' % (methodName, ', '.join(params))
def TranslateConstants(token):
if isinstance(token, (mojom.EnumValue, mojom.NamedValue)):
# Both variable and enum constants are constructed like:
# NamespaceUid.Struct.Enum_CONSTANT_NAME
name = ""
if token.imported_from:
name = token.imported_from["unique_name"] + "."
if token.parent_kind:
name = name + token.parent_kind.name + "."
if isinstance(token, mojom.EnumValue):
name = name + token.enum.name + "_"
return name + token.name
if isinstance(token, mojom.BuiltinValue):
if token.value == "double.INFINITY" or token.value == "float.INFINITY":
return "double.INFINITY";
if token.value == "double.NEGATIVE_INFINITY" or \
token.value == "float.NEGATIVE_INFINITY":
return "double.NEGATIVE_INFINITY";
if token.value == "double.NAN" or token.value == "float.NAN":
return "double.NAN";
# Strip leading '+'.
if token[0] == '+':
token = token[1:]
return token
def ExpressionToText(token):
if isinstance(token, (mojom.EnumValue, mojom.NamedValue)):
return str(token.resolved_value)
return TranslateConstants(token)
def GetArrayKind(kind, size = None):
if size is None:
return mojom.Array(kind)
else:
array = mojom.Array(kind, 0)
array.dart_map_size = size
return array
def GetArrayExpectedLength(kind):
if mojom.IsArrayKind(kind) and kind.length is not None:
return getattr(kind, 'dart_map_size', str(kind.length))
else:
return 'bindings.kUnspecifiedArrayLength'
def IsPointerArrayKind(kind):
if not mojom.IsArrayKind(kind):
return False
sub_kind = kind.kind
return mojom.IsObjectKind(sub_kind)
def GetImportUri(module):
elements = module.namespace.split('.')
elements.append("%s" % module.name)
return os.path.join("mojom", *elements)
class Generator(generator.Generator):
dart_filters = {
'array_expected_length': GetArrayExpectedLength,
'array': GetArrayKind,
'decode_method': DecodeMethod,
'default_value': DartDefaultValue,
'encode_method': EncodeMethod,
'expression_to_text': ExpressionToText,
'is_map_kind': mojom.IsMapKind,
'is_nullable_kind': mojom.IsNullableKind,
'is_pointer_array_kind': IsPointerArrayKind,
'is_struct_kind': mojom.IsStructKind,
'is_union_kind': mojom.IsUnionKind,
'dart_true_false': GetDartTrueFalse,
'dart_type': DartDeclType,
'name': GetNameForElement,
'tag_name': GetUnionFieldTagName,
'interface_response_name': GetInterfaceResponseName,
'dot_to_underscore': DotToUnderscore,
}
def GetParameters(self, args):
return {
"namespace": self.module.namespace,
"imports": self.GetImports(args),
"kinds": self.module.kinds,
"enums": self.module.enums,
"module": resolver.ResolveConstants(self.module, ExpressionToText),
"structs": self.GetStructs() + self.GetStructsFromMethods(),
"unions": self.GetUnions(),
"interfaces": self.GetInterfaces(),
"imported_interfaces": self.GetImportedInterfaces(),
"imported_from": self.ImportedFrom(),
}
@UseJinja("dart_templates/module.lib.tmpl", filters=dart_filters)
def GenerateLibModule(self, args):
return self.GetParameters(args)
def GenerateFiles(self, args):
elements = self.module.namespace.split('.')
elements.append("%s.dart" % self.module.name)
path = os.path.join("dart-pkg", "mojom/lib", *elements)
self.Write(self.GenerateLibModule(args), path)
path = os.path.join("dart-gen", "mojom/lib", *elements)
self.Write(self.GenerateLibModule(args), path)
link = self.MatchMojomFilePath("%s.dart" % self.module.name)
if os.path.exists(os.path.join(self.output_dir, link)):
os.unlink(os.path.join(self.output_dir, link))
try:
if sys.platform == "win32":
shutil.copy(os.path.join(self.output_dir, path),
os.path.join(self.output_dir, link))
else:
os.symlink(os.path.join(self.output_dir, path),
os.path.join(self.output_dir, link))
except OSError as e:
# Errno 17 is file already exists. If the link fails because file already
# exists assume another instance of this script tried to create the same
# file and continue on.
if e.errno != 17:
raise e
def GetImports(self, args):
used_names = set()
for each_import in self.module.imports:
simple_name = each_import["module_name"].split(".")[0]
# Since each import is assigned a library in Dart, they need to have
# unique names.
unique_name = simple_name
counter = 0
while unique_name in used_names:
counter += 1
unique_name = simple_name + str(counter)
used_names.add(unique_name)
each_import["unique_name"] = unique_name + '_mojom'
counter += 1
each_import["rebased_path"] = GetImportUri(each_import['module'])
return self.module.imports
def GetImportedInterfaces(self):
interface_to_import = {}
for each_import in self.module.imports:
for each_interface in each_import["module"].interfaces:
name = each_interface.name
interface_to_import[name] = each_import["unique_name"] + "." + name
return interface_to_import
def ImportedFrom(self):
interface_to_import = {}
for each_import in self.module.imports:
for each_interface in each_import["module"].interfaces:
name = each_interface.name
interface_to_import[name] = each_import["unique_name"] + "."
return interface_to_import
| bsd-3-clause | -3,463,575,132,767,874,000 | 36.230769 | 79 | 0.651385 | false |
thinkopensolutions/odoo-saas-tools | saas_server_backup_ftp/models/saas_server.py | 1 | 2303 | # -*- coding: utf-8 -*-
import tempfile
from odoo import api, models
import logging
_logger = logging.getLogger(__name__)
try:
import pysftp
except ImportError:
_logger.debug('saas_server_backup_ftp requires the python library pysftp which is not found on your installation')
class SaasServerClient(models.Model):
_inherit = 'saas_server.client'
@api.model
def _transport_backup(self, dump_db, filename=None):
server = self.env['ir.config_parameter'].get_param('saas_server.sftp_server', None)
username = self.env['ir.config_parameter'].get_param('saas_server.sftp_username', None)
password = self.env['ir.config_parameter'].get_param('saas_server.sftp_password', None)
path = self.env['ir.config_parameter'].get_param('saas_server.sftp_path', None)
sftp_rsa_key_path = self.env['ir.config_parameter'].get_param(
'saas_server.sftp_rsa_key_path', None)
if sftp_rsa_key_path:
srv = pysftp.Connection(host=server, username=username,
private_key=sftp_rsa_key_path,
private_key_pass=password)
else:
srv = pysftp.Connection(host=server, username=username,
password=password)
# set keepalive to prevent socket closed / connection dropped error
srv._transport.set_keepalive(30)
try:
srv.chdir(path)
except IOError:
# Create directory and subdirs if they do not exist.
currentDir = ''
for dirElement in path.split('/'):
currentDir += dirElement + '/'
try:
srv.chdir(currentDir)
except:
print('(Part of the) path didn\'t exist. Creating it now at ' + currentDir)
# Make directory and then navigate into it
srv.mkdir(currentDir, mode=777)
srv.chdir(currentDir)
srv.chdir(path)
with tempfile.TemporaryFile() as t:
dump_db(t)
t.seek(0)
srv.putfo(t, filename)
srv.close()
@api.model
def schedule_saas_databases_backup(self):
self.search([('state', '!=', 'deleted')]).backup_database()
| lgpl-3.0 | -2,214,371,601,603,189,500 | 38.033898 | 118 | 0.577073 | false |
uclouvain/osis | base/tests/utils/test_operator.py | 1 | 2143 | # ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# ############################################################################
from django.test import SimpleTestCase
from base.utils import operator
class TestIsYearLower(SimpleTestCase):
def test_should_return_false_when_base_year_is_none(self):
self.assertFalse(
operator.is_year_lower(None, 2025)
)
def test_should_return_true_when_year_to_compare_to_is_none(self):
self.assertTrue(
operator.is_year_lower(2029, None)
)
def test_should_return_true_when_base_year_is_inferior_to_other_year(self):
self.assertTrue(
operator.is_year_lower(2017, 2029)
)
def test_should_return_false_when_base_year_is_equal_to_other_year(self):
self.assertFalse(
operator.is_year_lower(2017, 2017)
)
def test_should_return_false_when_base_year_is_greater_to_other_year(self):
self.assertFalse(
operator.is_year_lower(2019, 2017)
)
| agpl-3.0 | -7,646,645,716,573,557,000 | 39.415094 | 85 | 0.654062 | false |
NewpTone/stacklab-nova | debian/tmp/usr/lib/python2.7/dist-packages/nova/tests/test_configdrive2.py | 7 | 3462 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import os
import tempfile
from nova import test
from nova import flags
from nova.openstack.common import log
from nova import utils
from nova.virt import configdrive
from nova.virt.libvirt import utils as virtutils
FLAGS = flags.FLAGS
LOG = log.getLogger(__name__)
class ConfigDriveTestCase(test.TestCase):
def test_create_configdrive_iso(self):
imagefile = None
try:
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
'-allow-lowercase', '-allow-multidot', '-l',
'-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r',
'-V', 'config-2', mox.IgnoreArg(), attempts=1,
run_as_root=False).AndReturn(None)
self.mox.ReplayAll()
c = configdrive.ConfigDriveBuilder()
c._add_file('this/is/a/path/hello', 'This is some content')
(fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
os.close(fd)
c._make_iso9660(imagefile)
c.cleanup()
# Check cleanup
self.assertFalse(os.path.exists(c.tempdir))
finally:
if imagefile:
utils.delete_if_exists(imagefile)
def test_create_configdrive_vfat(self):
imagefile = None
try:
self.mox.StubOutWithMock(virtutils, 'mkfs')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(utils, 'trycmd')
virtutils.mkfs('vfat', mox.IgnoreArg(),
label='config-2').AndReturn(None)
utils.trycmd('mount', '-o', 'loop', mox.IgnoreArg(),
mox.IgnoreArg(),
run_as_root=True).AndReturn((None, None))
utils.trycmd('chown', mox.IgnoreArg(), mox.IgnoreArg(),
run_as_root=True).AndReturn((None, None))
utils.execute('umount', mox.IgnoreArg(),
run_as_root=True).AndReturn(None)
self.mox.ReplayAll()
c = configdrive.ConfigDriveBuilder()
c._add_file('this/is/a/path/hello', 'This is some content')
(fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
os.close(fd)
c._make_vfat(imagefile)
c.cleanup()
# Check cleanup
self.assertFalse(os.path.exists(c.tempdir))
# NOTE(mikal): we can't check for a VFAT output here because the
# filesystem creation stuff has been mocked out because it
# requires root permissions
finally:
if imagefile:
utils.delete_if_exists(imagefile)
| apache-2.0 | 231,965,157,446,457,250 | 32.941176 | 78 | 0.586944 | false |
colognecoin/colognecoin | contrib/bitrpc/bitrpc.py | 239 | 7836 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit | -956,826,790,930,642,200 | 23.185185 | 79 | 0.66169 | false |
tortib/nzbToMedia | libs/requests/models.py | 16 | 26200 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import DecodeError
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError, ContentDecodingError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_moved, # 307
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy()
p._cookies = self._cookies.copy()
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Closes the underlying file descriptor and releases the connection
back to the pool.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| gpl-3.0 | -4,308,682,372,408,899,000 | 31.790989 | 114 | 0.564046 | false |
j-carl/ansible | hacking/build_library/build_ansible/command_plugins/release_announcement.py | 55 | 2905 | # coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from collections import UserString
from distutils.version import LooseVersion
# Pylint doesn't understand Python3 namespace modules.
from ..commands import Command # pylint: disable=relative-beyond-top-level
from .. import errors # pylint: disable=relative-beyond-top-level
class VersionStr(UserString):
def __init__(self, string):
super().__init__(string.strip())
self.ver_obj = LooseVersion(string)
def transform_args(args):
# Make it possible to sort versions in the jinja2 templates
new_versions = []
for version in args.versions:
new_versions.append(VersionStr(version))
args.versions = new_versions
return args
def write_message(filename, message):
if filename != '-':
with open(filename, 'w') as out_file:
out_file.write(message)
else:
sys.stdout.write('\n\n')
sys.stdout.write(message)
class ReleaseAnnouncementCommand(Command):
name = 'release-announcement'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name,
description="Generate email and twitter announcements from template")
parser.add_argument("--version", dest="versions", type=str, required=True, action='append',
help="Versions of Ansible to announce")
parser.add_argument("--name", type=str, required=True, help="Real name to use on emails")
parser.add_argument("--email-out", type=str, default="-",
help="Filename to place the email announcement into")
parser.add_argument("--twitter-out", type=str, default="-",
help="Filename to place the twitter announcement into")
@classmethod
def main(cls, args):
if sys.version_info < (3, 6):
raise errors.DependencyError('The {0} subcommand needs Python-3.6+'
' to run'.format(cls.name))
# Import here because these functions are invalid on Python-3.5 and the command plugins and
# init_parser() method need to be compatible with Python-3.4+ for now.
# Pylint doesn't understand Python3 namespace modules.
from .. announce import create_short_message, create_long_message # pylint: disable=relative-beyond-top-level
args = transform_args(args)
twitter_message = create_short_message(args.versions)
email_message = create_long_message(args.versions, args.name)
write_message(args.twitter_out, twitter_message)
write_message(args.email_out, email_message)
return 0
| gpl-3.0 | -639,490,687,652,711,700 | 36.24359 | 118 | 0.651635 | false |
eugena/django | django/contrib/sessions/backends/signed_cookies.py | 383 | 2895 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.core import signing
class SessionStore(SessionBase):
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(self.session_key,
serializer=self.serializer,
# This doesn't handle non-default expiry dates, see #19201
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies')
except Exception:
# BadSignature, ValueError, or unpickling exceptions. If any of
# these happen, reset the session.
self.create()
return {}
def create(self):
"""
To create a new key, we simply make sure that the modified flag is set
so that the cookie is set on the client for the current request.
"""
self.modified = True
def save(self, must_create=False):
"""
To save, we get the session key as a securely signed string and then
set the modified flag so that the cookie is set on the client for the
current request.
"""
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
"""
This method makes sense when you're talking to a shared resource, but
it doesn't matter when you're storing the information in the client's
cookie.
"""
return False
def delete(self, session_key=None):
"""
To delete, we clear the session key and the underlying data structure
and set the modified flag so that the cookie is set on the client for
the current request.
"""
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
"""
Keeps the same data but with a new key. To do this, we just have to
call ``save()`` and it will automatically save a cookie with a new key
at the end of the request.
"""
self.save()
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
session_cache = getattr(self, '_session_cache', {})
return signing.dumps(session_cache, compress=True,
salt='django.contrib.sessions.backends.signed_cookies',
serializer=self.serializer)
@classmethod
def clear_expired(cls):
pass
| bsd-3-clause | -5,067,759,374,344,561,000 | 34.304878 | 78 | 0.611054 | false |
fubecka/f5-dashboard | flask/lib/python2.6/site-packages/dns/rdtypes/txtbase.py | 100 | 2994 | # Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""TXT-like base class."""
import dns.exception
import dns.rdata
import dns.tokenizer
class TXTBase(dns.rdata.Rdata):
"""Base class for rdata that is like a TXT record
@ivar strings: the text strings
@type strings: list of string
@see: RFC 1035"""
__slots__ = ['strings']
def __init__(self, rdclass, rdtype, strings):
super(TXTBase, self).__init__(rdclass, rdtype)
if isinstance(strings, str):
strings = [ strings ]
self.strings = strings[:]
def to_text(self, origin=None, relativize=True, **kw):
txt = ''
prefix = ''
for s in self.strings:
txt += '%s"%s"' % (prefix, dns.rdata._escapify(s))
prefix = ' '
return txt
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
strings = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if not (token.is_quoted_string() or token.is_identifier()):
raise dns.exception.SyntaxError("expected a string")
if len(token.value) > 255:
raise dns.exception.SyntaxError("string too long")
strings.append(token.value)
if len(strings) == 0:
raise dns.exception.UnexpectedEnd
return cls(rdclass, rdtype, strings)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
for s in self.strings:
l = len(s)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(s)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
strings = []
while rdlen > 0:
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
s = wire[current : current + l].unwrap()
current += l
rdlen -= l
strings.append(s)
return cls(rdclass, rdtype, strings)
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.strings, other.strings)
| apache-2.0 | 4,395,238,382,831,395,000 | 33.413793 | 79 | 0.604208 | false |
guschmue/tensorflow | tensorflow/python/kernel_tests/concatenate_dataset_op_test.py | 42 | 5552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class ConcatenateDatasetTest(test.TestCase):
def testConcatenateDataset(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 15),
np.array([37.0, 38.0, 39.0, 40.0]))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(concatenated.output_shapes, (tensor_shape.TensorShape(
[20]), tensor_shape.TensorShape([15]), tensor_shape.TensorShape([])))
iterator = concatenated.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(9):
result = sess.run(get_next)
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConcatenateDatasetDifferentShape(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
[ts.as_list()
for ts in nest.flatten(concatenated.output_shapes)], [[20], [None]])
iterator = concatenated.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(9):
result = sess.run(get_next)
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConcatenateDatasetDifferentStructure(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
input_dataset.concatenate(dataset_to_concatenate)
def testConcatenateDatasetDifferentType(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1.0], [2.0], [3.0], [4.0]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
if __name__ == "__main__":
test.main()
| apache-2.0 | -780,461,672,860,085,100 | 40.432836 | 80 | 0.626261 | false |
eneldoserrata/marcos_openerp | addons/product_container/product_container.py | 1 | 1581 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_ul(osv.osv):
_inherit = "product.ul"
_columns = {
'container_id' : fields.many2one('product.product', 'Container Product', domain=[('container_ok','=',True)]),
}
product_ul()
class product_product(osv.Model):
_inherit = 'product.product'
_columns = {
'container_ok': fields.boolean('Container', help='Select this if the product will act as a container to carry other products.'),
'container_id': fields.many2one('product.product', 'Packed In', domain=[('container_ok','=',True)])
}
product_product() | agpl-3.0 | 8,024,555,889,463,130,000 | 39.564103 | 136 | 0.617963 | false |
bioinformatics-ua/montra | emif/questionnaire/migrations/0006_auto__chg_field_choice_value__chg_field_choice_text_en__chg_field_ques.py | 2 | 12721 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Choice.value'
db.alter_column('questionnaire_choice', 'value', self.gf('django.db.models.fields.CharField')(max_length=1000))
# Changing field 'Choice.text_en'
db.alter_column('questionnaire_choice', 'text_en', self.gf('django.db.models.fields.CharField')(max_length=2000))
# Changing field 'Question.number'
db.alter_column('questionnaire_question', 'number', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'Question.help_text'
db.alter_column('questionnaire_question', 'help_text', self.gf('django.db.models.fields.CharField')(max_length=2255))
# Changing field 'QuestionSet.heading'
db.alter_column('questionnaire_questionset', 'heading', self.gf('django.db.models.fields.CharField')(max_length=255))
def backwards(self, orm):
# Changing field 'Choice.value'
db.alter_column('questionnaire_choice', 'value', self.gf('django.db.models.fields.CharField')(max_length=64))
# Changing field 'Choice.text_en'
db.alter_column('questionnaire_choice', 'text_en', self.gf('django.db.models.fields.CharField')(max_length=200))
# Changing field 'Question.number'
db.alter_column('questionnaire_question', 'number', self.gf('django.db.models.fields.CharField')(max_length=8))
# Changing field 'Question.help_text'
db.alter_column('questionnaire_question', 'help_text', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'QuestionSet.heading'
db.alter_column('questionnaire_questionset', 'heading', self.gf('django.db.models.fields.CharField')(max_length=64))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'answer': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Question']"}),
'runid': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Subject']"})
},
'questionnaire.choice': {
'Meta': {'object_name': 'Choice'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Question']"}),
'sortid': ('django.db.models.fields.IntegerField', [], {}),
'text_en': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'checks': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'extra_en': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'footer_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '2255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'questionset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionSet']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'stats': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'disable': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'redirect_url': ('django.db.models.fields.CharField', [], {'default': "'/static/complete.html'", 'max_length': '128'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'questionnaire.questionset': {
'Meta': {'object_name': 'QuestionSet'},
'checks': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Questionnaire']"}),
'sortid': ('django.db.models.fields.IntegerField', [], {}),
'text_en': ('django.db.models.fields.TextField', [], {})
},
'questionnaire.runinfo': {
'Meta': {'object_name': 'RunInfo'},
'cookies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'emailcount': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'emailsent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastemailerror': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'questionset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionSet']", 'null': 'True', 'blank': 'True'}),
'random': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'runid': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'skipped': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Subject']"}),
'tags': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'questionnaire.runinfohistory': {
'Meta': {'object_name': 'RunInfoHistory'},
'completed': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Questionnaire']"}),
'runid': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'skipped': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Subject']"}),
'tags': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'questionnaire.subject': {
'Meta': {'object_name': 'Subject'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'formtype': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '16'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'unset'", 'max_length': '8', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'nextrun': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'inactive'", 'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['questionnaire']
| gpl-3.0 | -1,646,891,356,422,549,800 | 68.513661 | 182 | 0.574955 | false |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/scipy/weave/examples/fibonacci.py | 100 | 3980 | # Typical run:
# C:\home\eric\wrk\scipy\weave\examples>python fibonacci.py
# Recursively computing the first 30 fibonacci numbers:
# speed in python: 4.31599998474
# speed in c: 0.0499999523163
# speed up: 86.32
# Looping to compute the first 30 fibonacci numbers:
# speed in python: 0.000520999908447
# speed in c: 5.00000715256e-005
# speed up: 10.42
# fib(30) 832040 832040 832040 832040
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import ext_tools
def build_fibonacci():
""" Builds an extension module with fibonacci calculators.
"""
mod = ext_tools.ext_module('fibonacci_ext')
a = 1 # this is effectively a type declaration
# recursive fibonacci in C
fib_code = """
int fib1(int a)
{
if(a <= 2)
return 1;
else
return fib1(a-2) + fib1(a-1);
}
"""
ext_code = """
return_val = fib1(a);
"""
fib = ext_tools.ext_function('c_fib1',ext_code,['a'])
fib.customize.add_support_code(fib_code)
mod.add_function(fib)
# looping fibonacci in C
fib_code = """
int fib2( int a )
{
int last, next_to_last, result;
if( a <= 2 )
return 1;
last = next_to_last = 1;
for(int i = 2; i < a; i++ )
{
result = last + next_to_last;
next_to_last = last;
last = result;
}
return result;
}
"""
ext_code = """
return_val = fib2(a);
"""
fib = ext_tools.ext_function('c_fib2',ext_code,['a'])
fib.customize.add_support_code(fib_code)
mod.add_function(fib)
mod.compile()
try:
import fibonacci_ext
except ImportError:
build_fibonacci()
import fibonacci_ext
c_fib1 = fibonacci_ext.c_fib1
c_fib2 = fibonacci_ext.c_fib2
#################################################################
# This where it might normally end, but we've added some timings
# below. Recursive solutions are much slower, and C is 10-50x faster
# than equivalent in Python for this simple little routine
#
#################################################################
def py_fib1(a):
if a <= 2:
return 1
else:
return py_fib1(a-2) + py_fib1(a-1)
def py_fib2(a):
if a <= 2:
return 1
last = next_to_last = 1
for i in range(2,a):
result = last + next_to_last
next_to_last = last
last = result
return result
import time
def recurse_compare(n):
print('Recursively computing the first %d fibonacci numbers:' % n)
t1 = time.time()
for i in range(n):
py_fib1(i)
t2 = time.time()
py = t2 - t1
print(' speed in python:', t2 - t1)
# load into cache
c_fib1(i)
t1 = time.time()
for i in range(n):
c_fib1(i)
t2 = time.time()
print(' speed in c:',t2 - t1)
print(' speed up: %3.2f' % (py/(t2-t1)))
def loop_compare(m,n):
print('Looping to compute the first %d fibonacci numbers:' % n)
t1 = time.time()
for i in range(m):
for i in range(n):
py_fib2(i)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1)/m)
# load into cache
c_fib2(i)
t1 = time.time()
for i in range(m):
for i in range(n):
c_fib2(i)
t2 = time.time()
print(' speed in c:',(t2 - t1) / m)
print(' speed up: %3.2f' % (py/(t2-t1)))
if __name__ == "__main__":
n = 30
recurse_compare(n)
m = 1000
loop_compare(m,n)
print('fib(30)', c_fib1(30),py_fib1(30),c_fib2(30),py_fib2(30))
| bsd-3-clause | -4,900,537,793,102,703,000 | 25.711409 | 70 | 0.489698 | false |
SGenheden/lammps | tools/moltemplate/src/postprocess_input_script.py | 19 | 5958 | #!/usr/bin/env python
"""
Reorder the integer arguments to the commands in a LAMMPS input
file if these arguments violate LAMMPS order requirements.
We have to do this because the moltemplate.sh script will automatically
assign these integers in a way which may violate these restrictions
and the user has little control over this.
This script:
swaps the I and J integers in "pair_coeff I J ..." commands when I > J
Other features may be added later
"""
import sys
lines_orig = []
f = None
fname = None
num_lines_ignore = 0
# Lines from files passed as arguments are read and processed silently.
# (Why? Sometimes it's necessary to read the contents of previous input scripts
# in order to be able to understand a script command which appears later.
# I'm assuming these files will be processed by lammps in the same order. So I
# must insure that moltemplate.sh passes them to this program in that order.
# I'm too lazy to read the "include" commands in input scripts correctly.)
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
f = open(fname, 'r')
in_stream = f
lines_orig += in_stream.readlines()
num_lines_ignore += len(lines_orig)
f.close()
# Lines read from the standard input are read, processed, and printed to stdout
in_stream = sys.stdin
lines_orig += in_stream.readlines()
pair_style_list=[]
swap_occured = False
warn_wildcard = False
i=0
while i < len(lines_orig):
# Read the next logical line
# Any lines ending in '&' should be merged with the next line before breaking
line_orig = ''
while i < len(lines_orig):
line_counter = 1 + i - num_lines_ignore
line_orig += lines_orig[i]
if ((len(line_orig) < 2) or (line_orig[-2:] != '&\n')):
break
i += 1
line = line_orig.replace('&\n','\n').rstrip('\n')
comment = ''
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
comment = line_orig[ic:] # keep track of comments (put them back later)
tokens = line.strip().split()
if ((len(tokens) >= 2) and (tokens[0] == 'pair_style')):
pair_style_list = tokens[1:]
if ((len(tokens) >= 3) and (tokens[0] == 'pair_coeff')):
if ((tokens[1].isdigit() and (tokens[2].isdigit())) and
(int(tokens[1]) > int(tokens[2]))):
swap_occured = True
tmp = tokens[2]
tokens[2] = tokens[1]
tokens[1] = tmp
if i >= num_lines_ignore:
# polite warning:
sys.stderr.write('swapped pair_coeff order on line '+str(line_counter))
#if (fname != None):
# sys.stderr.write(' of file \"'+fname+'\"')
sys.stderr.write('\n')
# Deal with the "hbond/" pair coeffs.
#
# The hbond/dreiding pair style designates one of the two atom types
# as a donor, and the other as an acceptor (using the 'i','j' flags)
# If swapped atom types eariler, we also need to swap 'i' with 'j'.
#
# If "hbond/dreiding.." pair style is used with "hybrid" or
# "hybrid/overlay" then tokens[3] is the name of the pair style
# and tokens[5] is either 'i' or 'j'.
if len(pair_style_list) > 0:
if ((pair_style_list[0] == 'hybrid') or
(pair_style_list[0] == 'hybrid/overlay')):
if ((len(tokens) > 5) and (tokens[5] == 'i') and (tokens[3][0:6]=='hbond/')):
tokens[5] = 'j'
sys.stderr.write(' (and replaced \"i\" with \"j\")\n')
elif ((len(tokens) > 5) and (tokens[5] == 'j') and (tokens[3][0:6]=='hbond/')):
tokens[5] = 'i'
sys.stderr.write(' (and replaced \"j\" with \"i\")\n')
elif (pair_style_list[0][0:6] == 'hbond/'):
if ((len(tokens) > 4) and (tokens[4] == 'i')):
tokens[4] = 'j'
sys.stderr.write(' (and replaced \"i\" with \"j\")\n')
elif ((len(tokens) > 4) and (tokens[4] == 'j')):
tokens[4] = 'i'
sys.stderr.write(' (and replaced \"j\" with \"i\")\n')
sys.stdout.write((' '.join(tokens)+comment).replace('\n','&\n')+'\n')
else:
if ((('*' in tokens[1]) or ('*' in tokens[2]))
and
(not (('*' == tokens[1]) and ('*' == tokens[2])))):
warn_wildcard = True
if i >= num_lines_ignore:
sys.stdout.write(line_orig)
else:
if i >= num_lines_ignore:
sys.stdout.write(line_orig)
i += 1
if swap_occured:
sys.stderr.write('\n'
' WARNING: Atom order in some pair_coeff commands was swapped to pacify LAMMPS.\n'
' For some exotic pair_styles such as hbond/dreiding, this is not enough. If you\n'
' use exotic pair_styles, please verify the \"pair_coeff\" commands are correct.\n')
if warn_wildcard:
sys.stderr.write('\n'
' WARNING: The use of wildcard characters (\"*\") in your \"pair_coeff\"\n'
' commands is not recommended.\n'
' (It is safer to specify each interaction pair manually.\n'
' Check every pair_coeff command. Make sure that every atom type in\n'
' the first group is <= atom types in the second group.\n'
' Moltemplate does NOT do this when wildcards are used.)\n'
' If you are using a many-body pair style then ignore this warning.\n')
| gpl-2.0 | 5,572,488,333,122,137,000 | 39.530612 | 106 | 0.521652 | false |
marionleborgne/cloudbrain-websocket-server | src/cbws/server.py | 2 | 12783 | import pika
import json
import logging
from collections import defaultdict
from sockjs.tornado.conn import SockJSConnection
from sockjs.tornado import SockJSRouter
from tornado.ioloop import IOLoop
from tornado.web import Application
from uuid import uuid4
from cloudbrain.core.auth import CloudbrainAuth
_LOGGER = logging.getLogger()
_LOGGER.setLevel(logging.INFO)
recursivedict = lambda: defaultdict(recursivedict)
def _rt_stream_connection_factory(rabbitmq_address, rabbit_auth_url):
"""
RtStreamConnection class factory.
:param rabbitmq_address: RabbitMQ server address.
:param rabbit_auth_url: RabbitMQ authentication server address.
:return: RtStreamConnection
"""
class RtStreamConnection(SockJSConnection):
"""RtStreamConnection connection implementation"""
clients = set()
def __init__(self, session):
super(self.__class__, self).__init__(session)
self.subscribers = recursivedict()
self.total_records = recursivedict()
def send_probe_factory(self, exchange_name, routing_key):
def send_probe(body):
logging.debug("GOT: " + body)
buffer_content = json.loads(body)
# FIXME: Keep old buffer parsing for backward compatibility.
if type(buffer_content) == list:
for record in buffer_content:
self.subscribers[exchange_name][routing_key] \
["total_records"] += 1
record["exchangeName"] = exchange_name
record["routingKey"] = routing_key
self.send(json.dumps(record))
# FIXME: This is the new data format. Keep this parsing.
elif type(buffer_content) == dict:
for record in buffer_content['chunk']:
self.subscribers[exchange_name][routing_key] \
["total_records"] += 1
record["exchangeName"] = exchange_name
record["routingKey"] = routing_key
self.send(json.dumps(record))
return send_probe
def on_open(self, info):
logging.info("Got a new connection...")
self.clients.add(self)
def on_message(self, message):
"""
This will receive instructions from the client to change the
stream. After the connection is established we expect to receive a
JSON with exchangeName, routingKey, token; then we subscribe to
RabbitMQ and start streaming the data.
NOTE: it's not possible to open multiple connections from the same
client. so in case we need to stream different devices/metrics/etc.
at the same time, we need to use a solution that is like the
multiplexing in the sockjs-tornado examples folder.
:param message: subscription message to process
"""
logging.info("Got a new subscription message: " + message)
msg_dict = json.loads(message)
if msg_dict['type'] == 'subscription':
self.handle_channel_subscription(msg_dict)
elif msg_dict['type'] == 'unsubscription':
self.handle_channel_unsubscription(msg_dict)
def handle_channel_subscription(self, stream_configuration):
exchange_name = stream_configuration['exchangeName']
routing_key = stream_configuration['routingKey']
queue_name = 'websocket-client-%s' % str(uuid4())
# Look for a token.
token = (stream_configuration['token']
if 'token' in stream_configuration else None)
# If there is a token, use it in place of rabbitmq user and pwd.
if token:
rabbitmq_user = token
rabbitmq_pwd = ""
# Otherwise, look for a username and password.
elif ('user' in stream_configuration
and 'password' in stream_configuration):
rabbitmq_user = stream_configuration['user']
rabbitmq_pwd = stream_configuration['password']
else:
raise ValueError('Missing parameters in subscribe message '
'to websocket server. You muse either pass '
'the param "token" OR two params: '
'"username" and "password".')
if not self.routing_key_exists(exchange_name, routing_key):
self.subscribers[exchange_name][routing_key] = {
"subscriber": TornadoSubscriber(
callback=self.send_probe_factory(
exchange_name, routing_key),
exchange_name=exchange_name,
routing_key=routing_key,
rabbit_auth_url=rabbit_auth_url,
rabbitmq_address=rabbitmq_address,
rabbitmq_user=rabbitmq_user,
rabbitmq_pwd=rabbitmq_pwd,
queue_name=queue_name,
token=token
),
"total_records": 0
}
self.subscribers[exchange_name] \
[routing_key]["subscriber"].connect()
def handle_channel_unsubscription(self, unsubscription_msg):
exchange_name = unsubscription_msg['exchangeName']
routing_key = unsubscription_msg['routingKey']
logging.info("Unsubscription received for "
"exchange_name: %s, routing_key: %s"
% (exchange_name, routing_key))
if self.routing_key_exists(exchange_name, routing_key):
self.subscribers[exchange_name][routing_key] \
["subscriber"].disconnect()
def on_close(self):
logging.info("Disconnecting client...")
for exchange_name in self.subscribers:
for routing_key in self.subscribers[exchange_name]:
subscriber = self.subscribers[exchange_name] \
[routing_key]["subscriber"]
if subscriber is not None:
logging.info(
"Disconnecting subscriber for exchange_name: %s, "
"routing_key: %s" % (exchange_name, routing_key))
subscriber.disconnect()
self.subscribers = {}
self.clients.remove(self)
logging.info("Client disconnection complete!")
def send_heartbeat(self):
self.broadcast(self.clients, 'message')
def routing_key_exists(self, exchange_name, routing_key):
return (self.subscribers.has_key(exchange_name)
and self.subscribers[exchange_name].has_key(routing_key))
return RtStreamConnection
class TornadoSubscriber(object):
"""
See: https://pika.readthedocs.org/en/0.9.14/examples/tornado_consumer.html
"""
def __init__(self, callback, exchange_name, routing_key, rabbit_auth_url,
rabbitmq_address, rabbitmq_user, rabbitmq_pwd,
queue_name, token=None):
self.callback = callback
self.exchange_name = exchange_name
self.routing_key = routing_key
self.rabbit_auth_url = rabbit_auth_url
self.rabbitmq_address = rabbitmq_address
self.rabbitmq_user = rabbitmq_user
self.rabbitmq_pwd = rabbitmq_pwd
self.queue_name = queue_name
self.token = token
self.connection = None
self.channel = None
self.consumer_tag = None
def connect(self):
auth = CloudbrainAuth(self.rabbit_auth_url)
if self.token:
credentials = pika.PlainCredentials(self.token, '')
vhost = auth.get_vhost_by_token(self.token)
connection_params = pika.ConnectionParameters(
host=self.rabbitmq_address, virtual_host=vhost,
credentials=credentials)
else:
credentials = pika.PlainCredentials(self.rabbitmq_user,
self.rabbitmq_pwd)
vhost = getattr(self, 'rabbitmq_vhost',
auth.get_vhost_by_username(self.rabbitmq_user))
connection_params = pika.ConnectionParameters(
host=self.rabbitmq_address, virtual_host=vhost,
credentials=credentials)
self.connection = pika.adapters.tornado_connection.TornadoConnection(
connection_params,
self.on_connected,
stop_ioloop_on_close=False,
custom_ioloop=IOLoop.instance())
def disconnect(self):
if self.connection is not None:
self.connection.close()
def on_connected(self, connection):
self.connection = connection
self.connection.add_on_close_callback(self.on_connection_closed)
self.connection.add_backpressure_callback(self.on_backpressure_callback)
self.open_channel()
def on_connection_closed(self, connection, reply_code, reply_text):
self.connection = None
self.channel = None
def on_backpressure_callback(self, connection):
logging.info("******** Backpressure detected for exchange %s and "
"routing key %s" % (self.exchange_name, self.routing_key))
def open_channel(self):
self.connection.channel(self.on_channel_open)
def on_channel_open(self, channel):
self.channel = channel
self.channel.add_on_close_callback(self.on_channel_closed)
logging.info("Declaring exchange: %s" % self.exchange_name)
if self.exchange_name == 'amq.topic':
# Note: this is the reserved excahnge name for MQTT. Therefore,
# "type" must be "topic" and "durable" must be set to "True".
self.channel.exchange_declare(self.on_exchange_declareok,
exchange=self.exchange_name,
type='topic',
durable=True)
else:
self.channel.exchange_declare(self.on_exchange_declareok,
exchange=self.exchange_name,
type='direct',
passive=True)
def on_channel_closed(self, channel, reply_code, reply_text):
self.connection.close()
def on_exchange_declareok(self, unused_frame):
self.channel.queue_declare(self.on_queue_declareok,
self.queue_name,
exclusive=True)
def on_queue_declareok(self, unused_frame):
logging.info("Binding queue. Exchange name: %s. Routing key: %s"
% (self.exchange_name, self.routing_key))
self.channel.queue_bind(
self.on_bindok,
exchange=self.exchange_name,
queue=self.queue_name,
routing_key=self.routing_key)
def on_bindok(self, unused_frame):
self.channel.add_on_cancel_callback(self.on_consumer_cancelled)
self.consumer_tag = self.channel.basic_consume(self.on_message,
self.queue_name,
exclusive=True,
no_ack=True)
def on_consumer_cancelled(self, method_frame):
if self.channel:
self.channel.close()
def on_message(self, unused_channel, basic_deliver, properties, body):
self.callback(body)
class WebsocketServer(object):
def __init__(self, ws_server_port, rabbitmq_address, rabbit_auth_url):
self.rabbitmq_address = rabbitmq_address
self.ws_server_port = ws_server_port
self.rabbit_auth_url = rabbit_auth_url
def start(self):
RtStreamConnection = _rt_stream_connection_factory(
self.rabbitmq_address, self.rabbit_auth_url)
# 1. Create chat router
RtStreamRouter = SockJSRouter(RtStreamConnection, '/rt-stream')
# 2. Create Tornado application
app = Application(RtStreamRouter.urls)
# 3. Make Tornado app listen on Pi
app.listen(self.ws_server_port)
print("Real-time data server running at "
"http://localhost:%s" % self.ws_server_port)
# 4. Start IOLoop
IOLoop.instance().start()
def stop(self):
IOLoop.instance().stop()
| agpl-3.0 | 2,093,842,810,976,430,600 | 38.946875 | 80 | 0.565751 | false |
andrewyoung1991/scons | test/option/h.py | 5 | 2083 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.run(arguments = '-h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.run(arguments = '-u -h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.run(arguments = '-U -h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.run(arguments = '-D -h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.write('SConstruct', "")
test.run(arguments = '-h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.run(arguments = '-u -h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.run(arguments = '-U -h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.run(arguments = '-D -h')
test.must_contain_all_lines(test.stdout(), ['-h, --help'])
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 1,557,185,321,229,121,500 | 32.063492 | 73 | 0.707633 | false |
larsbutler/coveragepy | tests/helpers.py | 2 | 1599 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Helpers for coverage.py tests."""
import subprocess
def run_command(cmd):
"""Run a command in a sub-process.
Returns the exit status code and the combined stdout and stderr.
"""
proc = subprocess.Popen(
cmd, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output, _ = proc.communicate()
status = proc.returncode
# Get the output, and canonicalize it to strings with newlines.
if not isinstance(output, str):
output = output.decode('utf-8')
output = output.replace('\r', '')
return status, output
class CheckUniqueFilenames(object):
"""Asserts the uniqueness of filenames passed to a function."""
def __init__(self, wrapped):
self.filenames = set()
self.wrapped = wrapped
@classmethod
def hook(cls, cov, method_name):
"""Replace a method with our checking wrapper."""
method = getattr(cov, method_name)
hook = cls(method)
setattr(cov, method_name, hook.wrapper)
return hook
def wrapper(self, filename, *args, **kwargs):
"""The replacement method. Check that we don't have dupes."""
assert filename not in self.filenames, (
"Filename %r passed to %r twice" % (filename, self.wrapped)
)
self.filenames.add(filename)
ret = self.wrapped(filename, *args, **kwargs)
return ret
| apache-2.0 | -779,119,421,239,343,000 | 29.75 | 79 | 0.636648 | false |
wskplho/sl4a | python/python-twitter/simplejson/decoder.py | 135 | 12032 | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
raise ValueError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| apache-2.0 | -7,300,721,329,885,309,000 | 33.574713 | 108 | 0.523188 | false |
BhallaLab/moose-examples | traub_2005/py/fig_a3.py | 1 | 3700 | # fig_a3.py ---
#
# Filename: fig_a3.py
# Description:
# Author:
# Maintainer:
# Created: Wed Jun 26 17:07:59 2013 (+0530)
# Version:
# Last-Updated: Sun Jun 25 15:09:46 2017 (-0400)
# By: subha
# Update #: 162
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""This script is intended for reproducing figure A3 of the Traub et
al 2005 paper. This is a test for spiny stellate cell."""
import numpy as np
import pylab
import moose
from moose import utils
from cells import SpinyStellate
import config
simtime = 500e-3
simdt = 2e-5
plotdt=1e-4
def setup_model(root='/', hsolve=True):
moose.setCwe(root)
model = moose.Neutral('model')
data = moose.Neutral('data')
cell = SpinyStellate('%s/spinystellate' % (model.path))
p = '%s/comp_1'%cell.path
soma = moose.element(p) if moose.exists(p) else moose.Compartment(p)
if hsolve:
solver = moose.HSolve('%s/solve' % (cell.path))
solver.dt = simdt
solver.target = model.path
pulse = moose.PulseGen('%s/stimulus' % (model.path))
moose.connect(pulse, 'output', soma, 'injectMsg')
tab_vm = moose.Table('%s/spinystellate_soma_Vm' % (data.path))
moose.connect(tab_vm, 'requestOut', soma, 'getVm')
tab_stim = moose.Table('%s/spinystellate_soma_inject' % (data.path))
moose.connect(tab_stim, 'requestOut', pulse, 'getOutputValue')
utils.setDefaultDt(elecdt=simdt, plotdt2=plotdt)
utils.assignDefaultTicks(model, data)
return {'stimulus': pulse,
'tab_vm': tab_vm,
'tab_stim': tab_stim}
def do_sim(pulsegen, amp):
pulsegen.level[0] = amp
pulsegen.delay[0] = 50e-3
pulsegen.width[0] = 400e-3
moose.reinit()
utils.stepRun(simtime, 10000*simdt, logger=config.logger)
def main():
amps = [0.167e-9, 0.25e-9, 0.333e-9]
model_dict = setup_model()
for ii, a in enumerate(amps):
do_sim(model_dict['stimulus'], a)
config.logger.info('##### %d' % (model_dict['tab_vm'].size))
vm = model_dict['tab_vm'].vector
inject = model_dict['tab_stim'].vector.copy()
t = np.linspace(0, simtime, len(vm))
fname = 'data_fig_a3_%s.txt' % (chr(ord('A')+ii))
np.savetxt(fname,
np.vstack((t, inject, vm)).transpose())
msg = 'Saved data for %g A current pulse in %s' % (a, fname)
config.logger.info(msg)
print(msg)
pylab.subplot(3,1,ii+1)
pylab.title('%g nA' % (a*1e9))
pylab.plot(t, vm, label='soma-Vm (mV)')
stim_boundary = np.flatnonzero(np.diff(inject))
pylab.plot((t[stim_boundary[0]]), (vm.min()), 'r^', label='stimulus start')
pylab.plot((t[stim_boundary[-1]]), (vm.min()), 'gv', label='stimulus end')
pylab.legend()
pylab.savefig('fig_a3.png')
pylab.show()
if __name__ == '__main__':
main()
#
# fig_a3.py ends here
| gpl-2.0 | 8,776,512,083,494,052,000 | 28.83871 | 110 | 0.622703 | false |
alsrgv/tensorflow | tensorflow/contrib/periodic_resample/__init__.py | 56 | 1176 | # =============================================================================
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Custom op used by periodic_resample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.periodic_resample.python.ops.periodic_resample_op import periodic_resample
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["periodic_resample"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 3,581,821,861,418,024,400 | 42.555556 | 98 | 0.668367 | false |
vanda/DigitalLabels | labels/management/commands/build.py | 1 | 2432 | import codecs
import os
from distutils.dir_util import copy_tree
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.client import Client
from django.template.defaultfilters import slugify
from labels.models import DigitalLabel, Portal
class Command(BaseCommand):
args = "<digitallabel_id digitallabel_id>"
help = "Creates a static bundle of HTML, media and images for the labels"
option_list = BaseCommand.option_list + (
make_option('-o',
'--out',
default=".",
help="Where to put them?"
),
)
def handle(self, *args, **options):
destination = options['out']
for dl in DigitalLabel.objects.all():
self.save_html(dl, destination)
for pt in Portal.objects.all():
self.save_html(pt, destination)
# handle static media: JS, IMG, CSS, etc.
# SOURCE DIRS
media_abspath = os.path.abspath(settings.MEDIA_ROOT)
static_abspath = os.path.abspath(settings.STATIC_ROOT)
# DESTINATION DIRS
static_build_dir = os.path.join(destination,
os.path.basename(static_abspath))
media_build_dir = os.path.join(destination,
os.path.basename(media_abspath))
# COPY FILES
copy_tree(settings.STATIC_ROOT, static_build_dir)
copy_tree(os.path.join(settings.MEDIA_CACHE_ROOT), media_build_dir)
def save_html(self, screen, destination):
cl = Client()
page_html = cl.get('/%s/%d/' % (screen.model_name, screen.pk)).content
# make img, css and js links relative
page_html = page_html.replace('data-img-l="/media/cache/', 'data-img-l="./media/'
).replace('src="/', 'src="./'
).replace('src="./media/cache/', 'src="./media/'
).replace('href="/', 'href="./')
dest_abspath = os.path.abspath(destination)
if not os.path.exists(dest_abspath):
print 'Making %s' % (dest_abspath)
os.mkdir(dest_abspath)
filename = os.path.join(destination,
'%s.html' % (slugify(screen.name)))
f = codecs.open(filename, 'w', 'UTF-8')
unicode_html = unicode(page_html, 'UTF-8')
f.write(unicode_html)
| bsd-3-clause | -2,034,072,627,371,936,000 | 34.246377 | 89 | 0.580592 | false |
kpingul/shoppingWidget | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/ninja.py | 372 | 89149 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=config_name)
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
self.ninja.build(output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
gyp.common.uniquer(map(self.ExpandSpecial, ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
mem_limit = max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32)))
# return min(mem_limit, hard_cap)
# TODO(scottmg): Temporary speculative fix for OOM on builders
# See http://crbug.com/333000.
return 2
elif sys.platform.startswith('linux'):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
pool='link_pool')
solink_module_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_module_suffix,
'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_module_suffix, 'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $keys')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit | -4,818,809,088,044,118,000 | 40.464651 | 80 | 0.623731 | false |
finfish/scrapy | scrapy/spidermiddlewares/offsite.py | 7 | 2563 | """
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class OffsiteMiddleware(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider):
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug(
"Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
url_pattern = re.compile("^https?://.*$")
for domain in allowed_domains:
if url_pattern.match(domain):
message = ("allowed_domains accepts only domains, not URLs. "
"Ignoring URL entry %s in allowed_domains." % domain)
warnings.warn(message, URLWarning)
domains = [re.escape(d) for d in allowed_domains if d is not None]
regex = r'^(.*\.)?(%s)$' % '|'.join(domains)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
class URLWarning(Warning):
pass
| bsd-3-clause | 2,582,497,211,499,517,400 | 34.597222 | 87 | 0.587202 | false |
Theer108/invenio | invenio/utils/remote_debugger/__init__.py | 5 | 10582 | # This file is part of Invenio.
# Copyright (C) 2011, 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Module for debugging mod_python && mod_wsgi applications that run inside
the Apache webserver (or any other webserver). This is a utility module
that makes remote debugging possible and easy.
"""
import warnings
from invenio.utils.deprecation import RemovedInInvenio21Warning
warnings.warn("Remote debugger is going to be removed. "
"Please use native Werkzeug debugger.",
RemovedInInvenio21Warning)
# Debug mode is activated by passing debug=[debugger_id] in the url, when
# you try to load a webpage using such url, the execution will stop (if
# breakpoints are set, or automatically depending on the debugger you are
# using). This module is only a helper utility, the actual debugging is
# done by others.
#
# Each debugger has its own number:
#
# local winpdb: debug=1
# remote winpdb: debug=2
# remote pydev: debug=3
#
# If the debug parameter is not present, the code is executed normally
# (without stopping).
#
#
# Each debugger has its own parameters that can be set via url parameters,
# you can also create your your own debugging functions, and assign a new
# number to them. Please see get_debugger() function for more information on
# how to create a new call, and see the individual debuggers for information
# what parameters they accept.
#
# Important: Remember to set WSGIDaemonProcess processes=1 threads=1 in Apache
# ----------------------------- CONFIGURATION -----------------------------------------
from .config import CFG_REMOTE_DEBUGGER_ENABLED, \
CFG_REMOTE_DEBUGGER_IMPORT, CFG_REMOTE_DEBUGGER_WINPDB_PASSWORD, \
CFG_REMOTE_DEBUGGER_PYDEV_REMOTE_IP, CFG_REMOTE_DEBUGGER_PYDEV_REMOTE_PORT, \
CFG_REMOTE_DEBUGGER_PYDEV_PATHS, CFG_REMOTE_DEBUGGER_WSGI_RELOAD, \
CFG_PYDEV_DEBUG, CFG_REMOTE_DEBUGGER_TYPE, CFG_REMOTE_DEBUGGER_NAME
# -------------------------------------------------------------------------------------
# --------------------------- no config past this point -------------------------------
# -------------------------------------------------------------------------------------
from invenio import config
import os
import glob
import traceback
import sys
from six import StringIO
def start_file_changes_monitor():
from invenio.utils import remote_debugger_wsgi_reload as monitor
monitor.start(interval=1.0)
for pattern in CFG_REMOTE_DEBUGGER_WSGI_RELOAD:
for f in glob.glob(os.path.join(config.CFG_PREFIX, pattern)):
monitor.track(f)
# -------------------------------------------------------------------------------------
# ----------------------------- DEBUGGER PART LOADING --------------------------
# -------------------------------------------------------------------------------------
normcase = os.path.normcase
# raise exception so that this module is not loaded (this modules is always imported
# in try...except manner)
if not CFG_REMOTE_DEBUGGER_ENABLED:
raise Exception('Remote debugger is disabled')
# import modules that are configured for this debugger, at least for Eclipse, this
# MUST HAPPEN before other stuff gets loaded
for path, name in CFG_REMOTE_DEBUGGER_IMPORT.get(CFG_REMOTE_DEBUGGER_TYPE, {}).items():
try:
if '.' in path:
globals()[name] = __import__(path, globals(), locals(), path.split('.'))
else:
globals()[name] = __import__(path)
except Exception:
traceback.print_exc()
sys.stderr.write("Error in remote_debugger, import of the %s failed" % path)
def error_msg(debugger_args):
"""Error has been caught and we were given chance to report it"""
debug_no, params = parse_args(debugger_args)
if debug_no == '3':
exc_info = sys.exc_info()
if exc_info[0]:
exception_data = StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, exception_data)
exception_data = exception_data.getvalue()
if exception_data.endswith('\n'):
exception_data = exception_data[:-1]
#pydev is truncating data (no help printing in loop)
sys.stderr.write('\n\n...')
sys.stderr.write(exception_data[-600:])
sys.stderr.write('\n\n')
def start():
"""
Switch into a debugger mode manualy - to be called fromt the command line scripts mostly
@var debugger_args: string, eg. "3|ip:192.168.31.1|port:9999"
"""
debug_starter = get_debugger()
if debug_starter is None:
raise Exception("Requested debugger not found or not initalized properly.")
debug_starter()
def get_debugger():
"""
Returns function that will initialize the debugger
@var arg: arg passed from url parameter debug=xxx
@return: function call
"""
params = {}
if 'winpdb-local' == CFG_REMOTE_DEBUGGER_NAME:
func = start_embedded_winpdb_debugger
elif 'winpdb-remote' == CFG_REMOTE_DEBUGGER_NAME:
func = start_remote_winpdb_debugger
elif 'pydev-remote' == CFG_REMOTE_DEBUGGER_NAME:
func = start_remote_pydev_debugger
else:
return None
# we could determine the function signature and check arguments
# func.func_code.co_varnames[:func.func_code.co_argcount]
# but I don't do that intentionally (to raise error if something wrong is
# sumbmitted)
#raise(str(params))
return lambda: func(**params)
def parse_args(arg):
"""Parses arguments supplied through url param debug=xcxv
@return: tuple of debuggper_no, additional_params
"""
debug_no = ''
params = {}
# parse the passed-in arg
if '|' in arg[0]:
# it will raise error if something wrong happens
a = arg[0].split('|')
debug_no = a[0]
for k, v in map(lambda x: x.split(':'), a[1:]):
try:
v = int(v)
except:
if v == 'False':
v = False
elif v == 'True':
v = True
params[k] = v
else:
debug_no = arg[0]
return (debug_no, params)
def start_embedded_winpdb_debugger(passwd=None):
"""
Winpdb debugger, rpdb2 must be enabled in the
CFG_REMOTE_DEBUGGER_IMPORT
Change the call to suit your needs
"""
p = passwd or CFG_REMOTE_DEBUGGER_WINPDB_PASSWORD
rpdb2.start_embedded_debugger(p)
def start_remote_winpdb_debugger(passwd=None):
"""
Winpdb remote debugger, change the call to suit your needs
"""
p = passwd or CFG_REMOTE_DEBUGGER_WINPDB_PASSWORD
rpdb2.start_embedded_debugger(p, fAllowRemote=True)
def start_remote_pydev_debugger(ip=None, port=None, suspend=False, stderr=True, stdout=True, path=None):
"""
remote eclipse/pydev debugger, pydev and putils module should be available
in the CFG_REMOTE_DEBUGGER_IMPORT
If you want to change behaviour of the debugger interactively, you can
pass arguments encoded in the url, example:
http://someurl/collection/X?debug=3|ip:192.168.31.1|port:9999|stderr:0
@keyword ip: (str) the machine where the Pydev debugger is listening for incoming connections
@keyword port: (str) the port of the remote machine
@keyword suspend: (bool) whether to stop execution right after the debugger was activated
@keyword stderr: (bool) redirect the stderr to the remote debugging machine console
@keyword stdout: (bool) redirect the stdout to the remote debugging machine console
@keyword path: (str) list of mappings of <source> -> <target> paths separated by '#'
"""
# to see the translation
if CFG_PYDEV_DEBUG:
sys.stderr.write("We set the pydev to be verbose")
putils.DEBUG_CLIENT_SERVER_TRANSLATION = True
if hasattr(pydevd, "MYDEBUG"):
pydevd.MYDEBUG = False
i = ip or CFG_REMOTE_DEBUGGER_PYDEV_REMOTE_IP
p = port or CFG_REMOTE_DEBUGGER_PYDEV_REMOTE_PORT
_pydev_paths = None
if hasattr(putils, 'PATHS_FROM_ECLIPSE_TO_PYTHON'): #never versions of Pydev
_pydev_paths = getattr(putils, 'PATHS_FROM_ECLIPSE_TO_PYTHON')
elif hasattr(putils, 'PATHS_FROM_CLIENT_TO_SERVER'): # pydev 1.5
_pydev_paths = getattr(putils, 'PATHS_FROM_CLIENT_TO_SERVER')
# Eclipse needs to know how to map the file from the remote server
if CFG_REMOTE_DEBUGGER_PYDEV_PATHS:
xpaths = map(lambda x: (normcase(x[0]), normcase(x[1])), CFG_REMOTE_DEBUGGER_PYDEV_PATHS)
for couple in xpaths:
if couple not in _pydev_paths:
_pydev_paths.append(couple)
# paths set through the url parameter
if path:
elements = path.split('#')
if len(elements) % 2 == 1:
elements.pop(-1)
i = 0
xpaths = []
while len(elements):
xpaths.append((normcase(elements.pop(0)), normcase(elements.pop(0))))
for couple in xpaths:
if couple not in _pydev_paths:
_pydev_paths.append(couple)
# the first argument is the IP of the (remote) machine where Eclipse Pydev
# is listening, we send suspend=False to not bother with stopping the code executing when
# pydev is initialized, set your own breakpoints inside Eclipse to stop execution
# this is HACK!!! we basically try to reconnect to another IP as requested on url param
# I dont know if it does not break pydev internals at some point
if (ip is not None) and hasattr(pydevd, 'oldxxxip') and pydevd.oldxxxip != ip:
pydevd.connected = False
pydevd.settrace(i,
stdoutToServer=stdout,
stderrToServer=stderr,
port=p,
suspend=suspend)
pydevd.oldxxxip = ip
if CFG_PYDEV_DEBUG:
sys.stderr.write("These are the mapping paths\n")
sys.stderr.write(str(_pydev_paths) + "\n")
| gpl-2.0 | 4,239,405,827,464,427,000 | 34.993197 | 104 | 0.63419 | false |
rodrigods/keystone | keystone/catalog/backends/sql.py | 1 | 12218 | # Copyright 2012 OpenStack Foundation
# Copyright 2012 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import sqlalchemy
from keystone import catalog
from keystone.catalog import core
from keystone.common import sql
from keystone import config
from keystone import exception
CONF = config.CONF
class Region(sql.ModelBase, sql.DictBase):
__tablename__ = 'region'
attributes = ['id', 'description', 'parent_region_id', 'url']
id = sql.Column(sql.String(64), primary_key=True)
description = sql.Column(sql.String(255), nullable=False)
url = sql.Column(sql.String(255), nullable=True)
# NOTE(jaypipes): Right now, using an adjacency list model for
# storing the hierarchy of regions is fine, since
# the API does not support any kind of querying for
# more complex hierarchical queries such as "get me only
# the regions that are subchildren of this region", etc.
# If, in the future, such queries are needed, then it
# would be possible to add in columns to this model for
# "left" and "right" and provide support for a nested set
# model.
parent_region_id = sql.Column(sql.String(64), nullable=True)
# TODO(jaypipes): I think it's absolutely stupid that every single model
# is required to have an "extra" column because of the
# DictBase in the keystone.common.sql.core module. Forcing
# tables to have pointless columns in the database is just
# bad. Remove all of this extra JSON blob stuff.
# See: https://bugs.launchpad.net/keystone/+bug/1265071
extra = sql.Column(sql.JsonBlob())
class Service(sql.ModelBase, sql.DictBase):
__tablename__ = 'service'
attributes = ['id', 'type', 'enabled']
id = sql.Column(sql.String(64), primary_key=True)
type = sql.Column(sql.String(255))
enabled = sql.Column(sql.Boolean, nullable=False, default=True,
server_default=sqlalchemy.sql.expression.true())
extra = sql.Column(sql.JsonBlob())
endpoints = sqlalchemy.orm.relationship("Endpoint", backref="service")
class Endpoint(sql.ModelBase, sql.DictBase):
__tablename__ = 'endpoint'
attributes = ['id', 'interface', 'region', 'service_id', 'url',
'legacy_endpoint_id', 'enabled']
id = sql.Column(sql.String(64), primary_key=True)
legacy_endpoint_id = sql.Column(sql.String(64))
interface = sql.Column(sql.String(8), nullable=False)
region = sql.Column(sql.String(255))
service_id = sql.Column(sql.String(64),
sql.ForeignKey('service.id'),
nullable=False)
url = sql.Column(sql.Text(), nullable=False)
enabled = sql.Column(sql.Boolean, nullable=False, default=True,
server_default=sqlalchemy.sql.expression.true())
extra = sql.Column(sql.JsonBlob())
class Catalog(catalog.Driver):
# Regions
def list_regions(self, hints):
session = sql.get_session()
regions = session.query(Region)
regions = sql.filter_limit_query(Region, regions, hints)
return [s.to_dict() for s in list(regions)]
def _get_region(self, session, region_id):
ref = session.query(Region).get(region_id)
if not ref:
raise exception.RegionNotFound(region_id=region_id)
return ref
def _delete_child_regions(self, session, region_id):
"""Delete all child regions.
Recursively delete any region that has the supplied region
as its parent.
"""
children = session.query(Region).filter_by(parent_region_id=region_id)
for child in children:
self._delete_child_regions(session, child.id)
session.delete(child)
def _check_parent_region(self, session, region_ref):
"""Raise a NotFound if the parent region does not exist.
If the region_ref has a specified parent_region_id, check that
the parent exists, otherwise, raise a NotFound.
"""
parent_region_id = region_ref.get('parent_region_id')
if parent_region_id is not None:
# This will raise NotFound if the parent doesn't exist,
# which is the behavior we want.
self._get_region(session, parent_region_id)
def get_region(self, region_id):
session = sql.get_session()
return self._get_region(session, region_id).to_dict()
def delete_region(self, region_id):
session = sql.get_session()
with session.begin():
ref = self._get_region(session, region_id)
self._delete_child_regions(session, region_id)
session.delete(ref)
@sql.handle_conflicts(conflict_type='region')
def create_region(self, region_ref):
session = sql.get_session()
with session.begin():
self._check_parent_region(session, region_ref)
region = Region.from_dict(region_ref)
session.add(region)
return region.to_dict()
def update_region(self, region_id, region_ref):
session = sql.get_session()
with session.begin():
self._check_parent_region(session, region_ref)
ref = self._get_region(session, region_id)
old_dict = ref.to_dict()
old_dict.update(region_ref)
new_region = Region.from_dict(old_dict)
for attr in Region.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_region, attr))
return ref.to_dict()
# Services
@sql.truncated
def list_services(self, hints):
session = sql.get_session()
services = session.query(Service)
services = sql.filter_limit_query(Service, services, hints)
return [s.to_dict() for s in list(services)]
def _get_service(self, session, service_id):
ref = session.query(Service).get(service_id)
if not ref:
raise exception.ServiceNotFound(service_id=service_id)
return ref
def get_service(self, service_id):
session = sql.get_session()
return self._get_service(session, service_id).to_dict()
def delete_service(self, service_id):
session = sql.get_session()
with session.begin():
ref = self._get_service(session, service_id)
session.query(Endpoint).filter_by(service_id=service_id).delete()
session.delete(ref)
def create_service(self, service_id, service_ref):
session = sql.get_session()
with session.begin():
service = Service.from_dict(service_ref)
session.add(service)
return service.to_dict()
def update_service(self, service_id, service_ref):
session = sql.get_session()
with session.begin():
ref = self._get_service(session, service_id)
old_dict = ref.to_dict()
old_dict.update(service_ref)
new_service = Service.from_dict(old_dict)
for attr in Service.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_service, attr))
ref.extra = new_service.extra
return ref.to_dict()
# Endpoints
def create_endpoint(self, endpoint_id, endpoint_ref):
session = sql.get_session()
self.get_service(endpoint_ref['service_id'])
new_endpoint = Endpoint.from_dict(endpoint_ref)
with session.begin():
session.add(new_endpoint)
return new_endpoint.to_dict()
def delete_endpoint(self, endpoint_id):
session = sql.get_session()
with session.begin():
ref = self._get_endpoint(session, endpoint_id)
session.delete(ref)
def _get_endpoint(self, session, endpoint_id):
try:
return session.query(Endpoint).filter_by(id=endpoint_id).one()
except sql.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
def get_endpoint(self, endpoint_id):
session = sql.get_session()
return self._get_endpoint(session, endpoint_id).to_dict()
@sql.truncated
def list_endpoints(self, hints):
session = sql.get_session()
endpoints = session.query(Endpoint)
endpoints = sql.filter_limit_query(Endpoint, endpoints, hints)
return [e.to_dict() for e in list(endpoints)]
def update_endpoint(self, endpoint_id, endpoint_ref):
session = sql.get_session()
with session.begin():
ref = self._get_endpoint(session, endpoint_id)
old_dict = ref.to_dict()
old_dict.update(endpoint_ref)
new_endpoint = Endpoint.from_dict(old_dict)
for attr in Endpoint.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_endpoint, attr))
ref.extra = new_endpoint.extra
return ref.to_dict()
def get_catalog(self, user_id, tenant_id, metadata=None):
substitutions = dict(six.iteritems(CONF))
substitutions.update({'tenant_id': tenant_id, 'user_id': user_id})
session = sql.get_session()
t = True # variable for singleton for PEP8, E712.
endpoints = (session.query(Endpoint).
options(sql.joinedload(Endpoint.service)).
filter(Endpoint.enabled == t).all())
catalog = {}
for endpoint in endpoints:
if not endpoint.service['enabled']:
continue
try:
url = core.format_url(endpoint['url'], substitutions)
except exception.MalformedEndpoint:
continue # this failure is already logged in format_url()
region = endpoint['region']
service_type = endpoint.service['type']
default_service = {
'id': endpoint['id'],
'name': endpoint.service['name'],
'publicURL': ''
}
catalog.setdefault(region, {})
catalog[region].setdefault(service_type, default_service)
interface_url = '%sURL' % endpoint['interface']
catalog[region][service_type][interface_url] = url
return catalog
def get_v3_catalog(self, user_id, tenant_id, metadata=None):
d = dict(six.iteritems(CONF))
d.update({'tenant_id': tenant_id,
'user_id': user_id})
session = sql.get_session()
t = True # variable for singleton for PEP8, E712.
services = (session.query(Service).filter(Service.enabled == t).
options(sql.joinedload(Service.endpoints)).
all())
def make_v3_endpoints(endpoints):
for endpoint in (ep.to_dict() for ep in endpoints if ep.enabled):
del endpoint['service_id']
del endpoint['legacy_endpoint_id']
del endpoint['enabled']
try:
endpoint['url'] = core.format_url(endpoint['url'], d)
except exception.MalformedEndpoint:
continue # this failure is already logged in format_url()
yield endpoint
def make_v3_service(svc):
eps = list(make_v3_endpoints(svc.endpoints))
service = {'endpoints': eps, 'id': svc.id, 'type': svc.type}
name = svc.extra.get('name')
if name:
service['name'] = name
return service
return [make_v3_service(svc) for svc in services]
| apache-2.0 | 2,021,438,495,803,324,000 | 38.540453 | 78 | 0.600589 | false |
GeorgePlukov/FloodWatch | XBee-2.2.3/xbee/tests/test_zigbee.py | 27 | 9957 | """
test_zigbee.py
By Paul Malmsten, 2010
[email protected]
Tests the XBee ZB (ZigBee) implementation class for API compliance
"""
import unittest
from xbee.zigbee import ZigBee
class TestZigBee(unittest.TestCase):
"""
Tests ZigBee-specific features
"""
def setUp(self):
self.zigbee = ZigBee(None)
def test_null_terminated_field(self):
"""
Packets with null-terminated fields
should be properly parsed
"""
expected_data = b'\x01\x02\x03\x04'
terminator = b'\x00'
node_identifier = b'\x95' + b'\x00' * 21 + expected_data + terminator + b'\x00' * 8
data = self.zigbee._split_response(node_identifier)
self.assertEqual(data['node_id'], expected_data)
def test_split_node_identification_identifier(self):
data = b'\x95\x00\x13\xa2\x00\x40\x52\x2b\xaa\x7d\x84\x02\x7d\x84\x00\x13\xa2\x00\x40\x52\x2b\xaa\x20\x00\xff\xfe\x01\x01\xc1\x05\x10\x1e'
info = self.zigbee._split_response(data)
expected_info = {
'id': 'node_id_indicator',
'sender_addr_long': b'\x00\x13\xa2\x00\x40\x52\x2b\xaa',
'sender_addr': b'\x7d\x84',
'options': b'\x02',
'source_addr': b'\x7d\x84',
'source_addr_long': b'\x00\x13\xa2\x00\x40\x52\x2b\xaa',
'node_id': b' ',
'parent_source_addr': b'\xff\xfe',
'device_type': b'\x01',
'source_event': b'\x01',
'digi_profile_id': b'\xc1\x05',
'manufacturer_id': b'\x10\x1e',
}
self.assertEqual(info, expected_info)
def test_split_node_identification_identifier2(self):
data = b'\x95\x00\x13\xa2\x00\x40\x52\x2b\xaa\x7d\x84\x02\x7d\x84\x00\x13\xa2\x00\x40\x52\x2b\xaaCoordinator\x00\xff\xfe\x01\x01\xc1\x05\x10\x1e'
info = self.zigbee._split_response(data)
expected_info = {
'id': 'node_id_indicator',
'sender_addr_long': b'\x00\x13\xa2\x00\x40\x52\x2b\xaa',
'sender_addr': b'\x7d\x84',
'options': b'\x02',
'source_addr': b'\x7d\x84',
'source_addr_long': b'\x00\x13\xa2\x00\x40\x52\x2b\xaa',
'node_id': b'Coordinator',
'parent_source_addr': b'\xff\xfe',
'device_type': b'\x01',
'source_event': b'\x01',
'digi_profile_id': b'\xc1\x05',
'manufacturer_id': b'\x10\x1e',
}
self.assertEqual(info, expected_info)
def test_is_remote_at_response_parameter_parsed_as_io_samples(self):
"""
A remote AT command of IS, to take a sample immediately and respond
with the results, must be appropriately parsed for IO data.
"""
data = b'\x97A\x00\x13\xa2\x00@oG\xe4v\x1aIS\x00\x01\x1c\xc0\x06\x18\x00\x02\x8c\x03\x96'
info = self.zigbee._split_response(data)
expected_info = {
'id': 'remote_at_response',
'frame_id': b'A',
'source_addr_long': b'\x00\x13\xa2\x00@oG\xe4',
'source_addr': b'v\x1a',
'command': b'IS',
'status': b'\x00',
'parameter': [{'adc-1': 652,
'adc-2': 918,
'dio-10': False,
'dio-11': True,
'dio-12': True,
'dio-6': False,
'dio-7': False
}]
}
self.assertEqual(info, expected_info)
def test_lowercase_is_remote_at_response_parameter_parsed_as_io_samples(self):
"""
A remote AT command of lowercase is, to take a sample immediately and respond
with the results, must be appropriately parsed for IO data.
"""
data = b'\x97A\x00\x13\xa2\x00@oG\xe4v\x1ais\x00\x01\x1c\xc0\x06\x18\x00\x02\x8c\x03\x96'
info = self.zigbee._split_response(data)
expected_info = {
'id': 'remote_at_response',
'frame_id': b'A',
'source_addr_long': b'\x00\x13\xa2\x00@oG\xe4',
'source_addr': b'v\x1a',
'command': b'is',
'status': b'\x00',
'parameter': [{'adc-1': 652,
'adc-2': 918,
'dio-10': False,
'dio-11': True,
'dio-12': True,
'dio-6': False,
'dio-7': False
}]
}
self.assertEqual(info, expected_info)
def test_parsing_may_encounter_field_which_does_not_exist(self):
"""
Some fields are optional and may not exist; parsing should not crash
if/when they are not available.
"""
data = b'\x97A\x00\x13\xa2\x00@oG\xe4v\x1aIS\x01'
info = self.zigbee._split_response(data)
expected_info = {
'id': 'remote_at_response',
'frame_id': b'A',
'source_addr_long': b'\x00\x13\xa2\x00@oG\xe4',
'source_addr': b'v\x1a',
'command': b'IS',
'status': b'\x01',
}
self.assertEqual(info, expected_info)
def test_nd_at_response_parameter_parsed(self):
"""
An at_response for an ND command must be parsed.
"""
data = b'\x88AND\x00v\x1a\x00\x13\xa2\x00@oG\xe4ENDPOINT-1\x00\xff\xfe\x01\x00\xc1\x05\x10\x1e'
info = self.zigbee._split_response(data)
expected_info = {
'id': 'at_response',
'frame_id': b'A',
'command': b'ND',
'status': b'\x00',
'parameter': {'source_addr': b'\x76\x1a',
'source_addr_long': b'\x00\x13\xa2\x00\x40\x6f\x47\xe4',
'node_identifier': b'ENDPOINT-1',
'parent_address': b'\xff\xfe',
'device_type': b'\x01',
'status': b'\x00',
'profile_id': b'\xc1\x05',
'manufacturer': b'\x10\x1e',
}
}
self.assertEqual(info, expected_info)
def test_lowercase_nd_at_response_parameter_parsed(self):
"""
An at_response for a lowercase nd command must be parsed.
"""
data = b'\x88And\x00v\x1a\x00\x13\xa2\x00@oG\xe4ENDPOINT-1\x00\xff\xfe\x01\x00\xc1\x05\x10\x1e'
info = self.zigbee._split_response(data)
expected_info = {
'id': 'at_response',
'frame_id': b'A',
'command': b'nd',
'status': b'\x00',
'parameter': {'source_addr': b'\x76\x1a',
'source_addr_long': b'\x00\x13\xa2\x00\x40\x6f\x47\xe4',
'node_identifier': b'ENDPOINT-1',
'parent_address': b'\xff\xfe',
'device_type': b'\x01',
'status': b'\x00',
'profile_id': b'\xc1\x05',
'manufacturer': b'\x10\x1e',
}
}
self.assertEqual(info, expected_info)
class TestParseZigBeeIOData(unittest.TestCase):
"""
Test parsing ZigBee specific IO data
"""
def setUp(self):
self.zigbee = ZigBee(None)
def test_parse_dio_adc(self):
data = b'\x01\x08\x00\x0e\x08\x00\x00\x00\x02P\x02\x06'
expected_results = [{'dio-11': True,
'adc-1': 0,
'adc-2': 592,
'adc-3': 518}]
results = self.zigbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_samples_ticket_44(self):
"""
This example is from ticket 44 on Google Code:
https://code.google.com/p/python-xbee/issues/detail?id=44
The author claims the given data is generated by an
Xbee Pro 900HP module, but I could only find a definition
for packet with a response type of 0x92 in the XBee ZB
specification.
"""
data = (b'\x01' + # Number of samples
b'\x10\x00' + # Digital I/O mask (CD/DIO12 enabled)
b'\x0E' + # ADC 1,2,3 enabled
b'\x10\x00' + # DIO12 is high
b'\x03\xA4' + # ADC1 = 932
b'\x01\x31' + # ADC2 = 305
b'\x03\x31') # ADC3 = 817
expected_results = [{'dio-12': True,
'adc-1': 932,
'adc-2': 305,
'adc-3': 817}]
results = self.zigbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_dio_adc_supply_voltage_not_clamped(self):
"""
When bit 7 on the ADC mask is set, the supply voltage is included
in the ADC I/O sample section. This sample may exceed 10 bits of
precision, even though all other ADC channels are limited to a
range of 0-1.2v with 10 bits of precision. I assume that a voltage
divider and the firmware is used internally to compute the actual
Vcc voltage.
Therefore, the I/O sampling routine must not clamp this ADC
channel to 10 bits of precision.
"""
data = b'\x01\x00\x00\x80\x0D\x18'
expected_results = [{'adc-7':0xD18}]
results = self.zigbee._parse_samples(data)
self.assertEqual(results, expected_results)
| mit | 7,955,033,661,075,280,000 | 39.149194 | 157 | 0.487998 | false |
megamanfx/grit-i18n | grit/format/policy_templates/writers/android_policy_writer.py | 17 | 3492 | #!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from grit.format.policy_templates.writers import xml_formatted_writer
from xml.dom import minidom
from xml.sax import saxutils as xml_escape
def GetWriter(config):
'''Factory method for creating AndroidPolicyWriter objects.
See the constructor of TemplateWriter for description of
arguments.
'''
return AndroidPolicyWriter(['android'], config)
def _EscapeResource(resource):
'''Escape the resource for usage in an Android resource XML file.
This includes standard XML escaping as well as those specific to Android.
'''
if type(resource) == int:
return str(resource)
return xml_escape.escape(resource, {"'": "\\'", '"': '\\"', '\\': '\\\\'})
class AndroidPolicyWriter(xml_formatted_writer.XMLFormattedWriter):
'''Outputs localized Android Resource XML files.
The policy strings are localized and exposed as string resources for
consumption through Android's App restriction Schema.
'''
# DOM root node of the generated XML document.
_doc = None
# The resources node contains all resource 'string' and 'string-array'
# elements.
_resources = None
def AddStringResource(self, name, string):
'''Add a string resource of the given name.
'''
string_node = self._doc.createElement('string')
string_node.setAttribute('name', name)
string_node.appendChild(self._doc.createTextNode(_EscapeResource(string)))
self._resources.appendChild(string_node)
def AddStringArrayResource(self, name, string_items):
'''Add a string-array resource of the given name and
elements from string_items.
'''
string_array_node = self._doc.createElement('string-array')
string_array_node.setAttribute('name', name)
self._resources.appendChild(string_array_node)
for item in string_items:
string_node = self._doc.createElement('item')
string_node.appendChild(self._doc.createTextNode(_EscapeResource(item)))
string_array_node.appendChild(string_node)
def PreprocessPolicies(self, policy_list):
return self.FlattenGroupsAndSortPolicies(policy_list)
def CanBeRecommended(self, policy):
return False
def IsDeprecatedPolicySupported(self, policy):
return True
def IsFuturePolicySupported(self, policy):
return True
def WritePolicy(self, policy):
name = policy['name']
self.AddStringResource(name + 'Title', policy['caption'])
# Get the first line of the policy description.
description = policy['desc'].split('\n', 1)[0]
self.AddStringResource(name + 'Desc', description)
items = policy.get('items')
if items is not None:
entries = [ item['caption'] for item in items ]
values = [ item['value'] for item in items ]
self.AddStringArrayResource(name + 'Entries', entries)
self.AddStringArrayResource(name + 'Values', values)
def BeginTemplate(self):
comment_text = 'DO NOT MODIFY THIS FILE DIRECTLY!\n' \
'IT IS GENERATED FROM policy_templates.json.'
comment_node = self._doc.createComment(comment_text)
self._doc.insertBefore(comment_node, self._resources)
def Init(self):
impl = minidom.getDOMImplementation()
self._doc = impl.createDocument(None, 'resources', None)
self._resources = self._doc.documentElement
def GetTemplateText(self):
return self.ToPrettyXml(self._doc)
| bsd-2-clause | 4,386,340,638,288,412,000 | 33.92 | 78 | 0.712199 | false |
lduarte1991/edx-platform | common/djangoapps/terrain/stubs/tests/test_youtube_stub.py | 172 | 2639 | """
Unit test for stub YouTube implementation.
"""
import unittest
import requests
from ..youtube import StubYouTubeService
class StubYouTubeServiceTest(unittest.TestCase):
def setUp(self):
super(StubYouTubeServiceTest, self).setUp()
self.server = StubYouTubeService()
self.url = "http://127.0.0.1:{0}/".format(self.server.port)
self.server.config['time_to_response'] = 0.0
self.addCleanup(self.server.shutdown)
def test_unused_url(self):
response = requests.get(self.url + 'unused_url')
self.assertEqual("Unused url", response.content)
@unittest.skip('Failing intermittently due to inconsistent responses from YT. See TE-871')
def test_video_url(self):
response = requests.get(
self.url + 'test_youtube/OEoXaMPEzfM?v=2&alt=jsonc&callback=callback_func'
)
# YouTube metadata for video `OEoXaMPEzfM` states that duration is 116.
self.assertEqual(
'callback_func({"data": {"duration": 116, "message": "I\'m youtube.", "id": "OEoXaMPEzfM"}})',
response.content
)
def test_transcript_url_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t__eq_exist'
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.0" dur="1.0">',
'Equal transcripts</text></transcript>'
]), response.content
)
def test_transcript_url_not_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t_neq_exist',
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.1" dur="5.5">',
'Transcripts sample, different that on server',
'</text></transcript>'
]), response.content
)
def test_transcript_not_found(self):
response = requests.get(self.url + 'test_transcripts_youtube/some_id')
self.assertEqual(404, response.status_code)
def test_reset_configuration(self):
reset_config_url = self.url + 'del_config'
# add some configuration data
self.server.config['test_reset'] = 'This is a reset config test'
# reset server configuration
response = requests.delete(reset_config_url)
self.assertEqual(response.status_code, 200)
# ensure that server config dict is empty after successful reset
self.assertEqual(self.server.config, {})
| agpl-3.0 | -6,676,355,011,518,009,000 | 32.833333 | 106 | 0.598712 | false |
gregoil/rotest | src/rotest/management/common/messages.py | 1 | 6686 | """Holds the common resource management messages."""
from __future__ import absolute_import
from abc import ABCMeta
from basicstruct import BasicStruct
import six
def slots_extender(new_slots):
"""Extender decorator to add new slots to the wrapped class.
Arguments:
new_slots (tuple): new slots names.
Returns:
func. a method that decorate a class.
"""
def decorator(origin_class):
"""Decorate a class and add the given slots to it.
Actually, it creates a new class that derives from the given class and
add the new slots to it, also, it copies the documentation.
Arguments:
origin_class (class): the class to be wrapped.
Returns:
class. the new class.
"""
new_class = type(origin_class.__name__, (origin_class,), {})
new_class.__slots__ = origin_class.__slots__ + new_slots
new_class.__doc__ = origin_class.__doc__
return new_class
return decorator
@slots_extender(('msg_id',))
class AbstractMessage(six.with_metaclass(ABCMeta, BasicStruct)):
"""Basic message class.
Holds the common data for resource management messages.
Attributes:
msg_id (number): sending side unique message identifier.
"""
@slots_extender(('reason',))
class ParsingFailure(AbstractMessage):
"""Reply message on a request that failed to parse."""
pass
@slots_extender(('request_id',))
class AbstractReply(six.with_metaclass(ABCMeta, AbstractMessage)):
"""Abstract reply message for parsed request.
Attributes:
request_id (number): msg_id of the requested operation.
"""
class SuccessReply(AbstractReply):
"""Success reply message, answer on successful request."""
pass
@slots_extender(('should_skip',))
class ShouldSkipReply(AbstractReply):
"""Reply message to the 'should_skip' remote query."""
pass
@slots_extender(('code', 'content'))
class ErrorReply(AbstractReply):
"""Error reply message, answer on unsuccessful request.
Attributes:
code (number): error code.
content (str): content describing the failure.
"""
pass
@slots_extender(('resources',))
class ResourcesReply(AbstractReply):
"""Resources reply message.
Sent as an answer to a successful 'LockResources' request.
Attributes:
resources (list): list of
:class:'rotest.common.models.base_resource.BaseResource'.
"""
pass
@slots_extender(('descriptors',))
class QueryResources(AbstractMessage):
"""Query resources request message.
Attributes:
descriptors (dict): descriptors of to query in the format
{'type': resource_type_name, 'properties': {'key': value}}
timeout (number): seconds to wait for resources if they're unavailable.
"""
pass
@slots_extender(('descriptors', 'timeout'))
class LockResources(AbstractMessage):
"""Lock resources request message.
Attributes:
descriptors (list): descriptors of resources. list of dictionaries of
{'type': resource_type_name, 'properties': {'key': value}}
timeout (number): seconds to wait for resources if they're unavailable.
"""
pass
@slots_extender(('requests',))
class ReleaseResources(AbstractMessage):
"""Release resources request message.
Attributes:
requests (list): list of resources names.
"""
pass
@slots_extender(('user_name',))
class CleanupUser(AbstractMessage):
"""Clean user's resources request message.
Attributes:
user_name (str): name of the user to be cleaned.
"""
pass
@slots_extender(('tests', 'run_data'))
class StartTestRun(AbstractMessage):
"""Start the run of the test message.
Attributes:
tests (dict): structure and data of the tests to run.
run_data (dict): additional data relevant to the run.
"""
pass
class RunFinished(AbstractMessage):
"""Signals the end of the run.
Note:
This message is used in multiproccess runner to inform the manager of
the end of a worker's run.
"""
pass
@slots_extender(('run_data',))
class UpdateRunData(AbstractMessage):
"""Update the run data message.
Attributes:
run_data (dict): run data fields and values.
"""
pass
@slots_extender(('model', 'filter', 'kwargs'))
class UpdateFields(AbstractMessage):
"""Request to update content in the server's DB.
Attributes:
model (type): Django model to apply changes on.
filter (dict): arguments to filter by.
kwargs (dict): changes to apply on the filtered instances.
"""
pass
@slots_extender(('test_id',))
class AbstractTestEventMessage(AbstractMessage):
"""Abstract test event message.
Attributes:
test_id (number): identifier of the test.
"""
pass
class StartTest(AbstractTestEventMessage):
"""Start the run of a test message."""
pass
class SetupFinished(AbstractTestEventMessage):
"""Finished the setup of a test message."""
pass
class StartTeardown(AbstractTestEventMessage):
"""Start the teardown of a test message."""
pass
class ShouldSkip(AbstractTestEventMessage):
"""Check if the test should be skipped message."""
pass
class StopTest(AbstractTestEventMessage):
"""End the run of a test message."""
pass
@slots_extender(('resources',))
class UpdateResources(AbstractTestEventMessage):
"""Update the resources list of the test's locked resources.
Attributes:
resources (list): list of resource descriptor of the test.
"""
pass
@slots_extender(('resources',))
class CloneResources(AbstractTestEventMessage):
"""Update the resources list of the test's locked resources.
Attributes:
resources (dict): dict of the locked resources of the test.
Note:
This message is used in multiproccess runner to inform the manager of
the test's 'locked_resources' dict content.
"""
pass
class StartComposite(AbstractTestEventMessage):
"""Start the run of a composite test message."""
pass
class StopComposite(AbstractTestEventMessage):
"""End the run of a composite test message."""
pass
@slots_extender(('code', 'info'))
class AddResult(AbstractTestEventMessage):
"""Update a test result message.
Attributes:
code (number): TestOutcome result code.
info (str): additional data about the result (traceback, reason, etc.).
"""
pass
@slots_extender(('info',))
class AddInfo(AbstractTestEventMessage):
"""Register a success message.
Attributes:
info (str): success message.
"""
pass
| mit | -3,526,486,222,228,703,700 | 23.490842 | 79 | 0.664523 | false |
totalspectrum/binutils-propeller | gdb/testsuite/gdb.perf/lib/perftest/reporter.py | 7 | 2912 | # Copyright (C) 2013-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Text reports are written here.
# This is the perftest counterpart to gdb.sum.
SUM_FILE_NAME = "perftest.sum"
# Raw data that went into the report is written here.
# This is the perftest counterpart to gdb.log.
LOG_FILE_NAME = "perftest.log"
class Reporter(object):
"""Base class of reporter to report test results in a certain format.
Subclass, which is specific to a report format, should overwrite
methods report, start and end.
"""
def __init__(self, append):
"""Constructor of Reporter.
attribute append is used to determine whether to append or
overwrite log file.
"""
self.append = append
def report(self, *args):
raise NotImplementedError("Abstract Method:report.")
def start(self):
"""Invoked when reporting is started."""
raise NotImplementedError("Abstract Method:start.")
def end(self):
"""Invoked when reporting is done.
It must be overridden to do some cleanups, such as closing file
descriptors.
"""
raise NotImplementedError("Abstract Method:end.")
class TextReporter(Reporter):
"""Report results in a plain text file 'perftest.log'."""
def __init__(self, append):
super (TextReporter, self).__init__(Reporter(append))
self.txt_sum = None
self.txt_log = None
def report(self, test_name, measurement_name, data_points):
if len(data_points) == 0:
self.txt_sum.write("%s %s *no data recorded*\n" % (
test_name, measurement_name))
return
average = sum(data_points) / len(data_points)
data_min = min(data_points)
data_max = max(data_points)
self.txt_sum.write("%s %s %s\n" % (
test_name, measurement_name, average))
self.txt_log.write("%s %s %s, min %s, max %s, data %s\n" % (
test_name, measurement_name, average, data_min, data_max,
data_points))
def start(self):
mode = "a+" if self.append else "w"
self.txt_sum = open (SUM_FILE_NAME, mode);
self.txt_log = open (LOG_FILE_NAME, mode);
def end(self):
self.txt_sum.close ()
self.txt_log.close ()
| gpl-2.0 | -8,975,248,715,284,600,000 | 33.258824 | 73 | 0.644231 | false |
nitzmahone/ansible | lib/ansible/module_utils/aws/waf.py | 71 | 7415 | # Copyright (c) 2017 Will Thames
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Web Application Firewall modules
"""
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
from ansible.module_utils.aws.waiters import get_waiter
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
MATCH_LOOKUP = {
'byte': {
'method': 'byte_match_set',
'conditionset': 'ByteMatchSet',
'conditiontuple': 'ByteMatchTuple',
'type': 'ByteMatch'
},
'geo': {
'method': 'geo_match_set',
'conditionset': 'GeoMatchSet',
'conditiontuple': 'GeoMatchConstraint',
'type': 'GeoMatch'
},
'ip': {
'method': 'ip_set',
'conditionset': 'IPSet',
'conditiontuple': 'IPSetDescriptor',
'type': 'IPMatch'
},
'regex': {
'method': 'regex_match_set',
'conditionset': 'RegexMatchSet',
'conditiontuple': 'RegexMatchTuple',
'type': 'RegexMatch'
},
'size': {
'method': 'size_constraint_set',
'conditionset': 'SizeConstraintSet',
'conditiontuple': 'SizeConstraint',
'type': 'SizeConstraint'
},
'sql': {
'method': 'sql_injection_match_set',
'conditionset': 'SqlInjectionMatchSet',
'conditiontuple': 'SqlInjectionMatchTuple',
'type': 'SqlInjectionMatch',
},
'xss': {
'method': 'xss_match_set',
'conditionset': 'XssMatchSet',
'conditiontuple': 'XssMatchTuple',
'type': 'XssMatch'
},
}
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_rule_with_backoff(client, rule_id):
return client.get_rule(RuleId=rule_id)['Rule']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_byte_match_set_with_backoff(client, byte_match_set_id):
return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_ip_set_with_backoff(client, ip_set_id):
return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_xss_match_set_with_backoff(client, xss_match_set_id):
return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
def get_rule(client, module, rule_id):
try:
rule = get_rule_with_backoff(client, rule_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain waf rule")
match_sets = {
'ByteMatch': get_byte_match_set_with_backoff,
'IPMatch': get_ip_set_with_backoff,
'SizeConstraint': get_size_constraint_set_with_backoff,
'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
'XssMatch': get_xss_match_set_with_backoff
}
if 'Predicates' in rule:
for predicate in rule['Predicates']:
if predicate['Type'] in match_sets:
predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
# replaced by Id from the relevant MatchSet
del(predicate['DataId'])
return rule
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_web_acl_with_backoff(client, web_acl_id):
return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
def get_web_acl(client, module, web_acl_id):
try:
web_acl = get_web_acl_with_backoff(client, web_acl_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acl")
if web_acl:
try:
for rule in web_acl['Rules']:
rule.update(get_rule(client, module, rule['RuleId']))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
return camel_dict_to_snake_dict(web_acl)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_rules_with_backoff(client):
paginator = client.get_paginator('list_rules')
return paginator.paginate().build_full_result()['Rules']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_web_acls_with_backoff(client):
paginator = client.get_paginator('list_web_acls')
return paginator.paginate().build_full_result()['WebACLs']
def list_web_acls(client, module):
try:
return list_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acls")
def get_change_token(client, module):
try:
token = client.get_change_token()
return token['ChangeToken']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain change token")
@AWSRetry.backoff(tries=10, delay=2, backoff=2.0, catch_extra_error_codes=['WAFStaleDataException'])
def run_func_with_change_token_backoff(client, module, params, func, wait=False):
params['ChangeToken'] = get_change_token(client, module)
result = func(**params)
if wait:
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=result['ChangeToken']
)
return result
| gpl-3.0 | -1,828,308,143,459,802,600 | 36.831633 | 120 | 0.685233 | false |
werbk/task-6.19 | conftest.py | 1 | 2403 | import pytest
import logging
import json
import jsonpickle
import os.path
import importlib
from fixture.TestBase import BaseClass
from fixture.variables import UserLogin
fixture = None
target = None
@pytest.fixture
def app(request):
global fixture
global target
browser = request.config.getoption('--browser')
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), request.config.getoption('--target'))
with open(config_file) as file:
target = json.load(file)
#url = request.config.getoption('--baseUrl')
#login_user = request.config.getoption('--login_user')
#login_password = request.config.getoption('--login_password')
if fixture is None or not fixture.is_valid():
fixture = BaseClass(browser=browser, base_url=target['baseUrl'])
fixture.session.ensure_login(user_name=target['username'], password=target['password'])
return fixture
@pytest.fixture(scope='session', autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.restore()
request.addfinalizer(fin)
return fixture
def pytest_addoption(parser):
default_login_user = [UserLogin.name, UserLogin.password]
parser.addoption('--browser', action='store', default='firefox')
parser.addoption('--target', action='store', default='target.json') #'http://localhost/addressbook/')
# i believe that it possible do in 1 line but i don't know how two in 1 Login take to parameter at same time
#parser.addoption('--loginu', action='store', default=default_login_user[0])
#parser.addoption('--loginp', action='store', default=default_login_user[1])
def load_from_module(module):
return importlib.import_module("data.%s" % module).test_data
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/%s.json' % file)) as f:
return jsonpickle.decode(f.read())
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
| apache-2.0 | -698,316,811,076,072,600 | 32.375 | 116 | 0.685393 | false |
gerrive/horizon | openstack_dashboard/test/test_data/heat_data.py | 6 | 15719 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from heatclient.v1 import resource_types
from heatclient.v1 import resources
from heatclient.v1 import services
from heatclient.v1 import stacks
from openstack_dashboard.test.test_data import utils
# suppress warnings about our use of object comparisons in heatclient
logging.getLogger('heatclient.openstack.common.apiclient.base') \
.setLevel('ERROR')
# A slightly hacked up copy of a sample cloudformation template for testing.
TEMPLATE = """
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template.",
"Parameters": {
"KeyName": {
"Description": "Name of an EC2 Key Pair to enable SSH access to the instances",
"Type": "String"
},
"InstanceType": {
"Description": "WebServer EC2 instance type",
"Type": "String",
"Default": "m1.small",
"AllowedValues": [
"m1.tiny",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge"
],
"ConstraintDescription": "must be a valid EC2 instance type."
},
"DBName": {
"Default": "wordpress",
"Description": "The WordPress database name",
"Type": "String",
"MinLength": "1",
"MaxLength": "64",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription": "must begin with a letter and..."
},
"DBUsername": {
"Default": "admin",
"NoEcho": "true",
"Description": "The WordPress database admin account username",
"Type": "String",
"MinLength": "1",
"MaxLength": "16",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription": "must begin with a letter and..."
},
"DBPassword": {
"Default": "admin",
"NoEcho": "true",
"Description": "The WordPress database admin account password",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern": "[a-zA-Z0-9]*",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"DBRootPassword": {
"Default": "admin",
"NoEcho": "true",
"Description": "Root password for MySQL",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern": "[a-zA-Z0-9]*",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"LinuxDistribution": {
"Default": "F17",
"Description": "Distribution of choice",
"Type": "String",
"AllowedValues": [
"F18",
"F17",
"U10",
"RHEL-6.1",
"RHEL-6.2",
"RHEL-6.3"
]
},
"Network": {
"Type": "String",
"CustomConstraint": "neutron.network"
}
},
"Mappings": {
"AWSInstanceType2Arch": {
"m1.tiny": {
"Arch": "32"
},
"m1.small": {
"Arch": "64"
},
"m1.medium": {
"Arch": "64"
},
"m1.large": {
"Arch": "64"
},
"m1.xlarge": {
"Arch": "64"
}
},
"DistroArch2AMI": {
"F18": {
"32": "F18-i386-cfntools",
"64": "F18-x86_64-cfntools"
},
"F17": {
"32": "F17-i386-cfntools",
"64": "F17-x86_64-cfntools"
},
"U10": {
"32": "U10-i386-cfntools",
"64": "U10-x86_64-cfntools"
},
"RHEL-6.1": {
"32": "rhel61-i386-cfntools",
"64": "rhel61-x86_64-cfntools"
},
"RHEL-6.2": {
"32": "rhel62-i386-cfntools",
"64": "rhel62-x86_64-cfntools"
},
"RHEL-6.3": {
"32": "rhel63-i386-cfntools",
"64": "rhel63-x86_64-cfntools"
}
}
},
"Resources": {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"yum": {
"mysql": [],
"mysql-server": [],
"httpd": [],
"wordpress": []
}
},
"services": {
"systemd": {
"mysqld": {
"enabled": "true",
"ensureRunning": "true"
},
"httpd": {
"enabled": "true",
"ensureRunning": "true"
}
}
}
}
}
},
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"DistroArch2AMI",
{
"Ref": "LinuxDistribution"
},
{
"Fn::FindInMap": [
"AWSInstanceType2Arch",
{
"Ref": "InstanceType"
},
"Arch"
]
}
]
},
"InstanceType": {
"Ref": "InstanceType"
},
"KeyName": {
"Ref": "KeyName"
},
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash -v\\n",
"/opt/aws/bin/cfn-init\\n"
]
]
}
}
}
}
},
"Outputs": {
"WebsiteURL": {
"Value": {
"Fn::Join": [
"",
[
"http://",
{
"Fn::GetAtt": [
"WikiDatabase",
"PublicIp"
]
},
"/wordpress"
]
]
},
"Description": "URL for Wordpress wiki"
}
}
}
"""
VALIDATE = """
{
"Description": "AWS CloudFormation Sample Template.",
"Parameters": {
"DBUsername": {
"Type": "String",
"Description": "The WordPress database admin account username",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "16",
"ConstraintDescription": "must begin with a letter and..."
},
"LinuxDistribution": {
"Default": "F17",
"Type": "String",
"Description": "Distribution of choice",
"AllowedValues": [
"F18",
"F17",
"U10",
"RHEL-6.1",
"RHEL-6.2",
"RHEL-6.3"
]
},
"DBRootPassword": {
"Type": "String",
"Description": "Root password for MySQL",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"KeyName": {
"Type": "String",
"Description": "Name of an EC2 Key Pair to enable SSH access to the instances"
},
"DBName": {
"Type": "String",
"Description": "The WordPress database name",
"Default": "wordpress",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"MaxLength": "64",
"ConstraintDescription": "must begin with a letter and..."
},
"DBPassword": {
"Type": "String",
"Description": "The WordPress database admin account password",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"InstanceType": {
"Default": "m1.small",
"Type": "String",
"ConstraintDescription": "must be a valid EC2 instance type.",
"Description": "WebServer EC2 instance type",
"AllowedValues": [
"m1.tiny",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge"
]
},
"Network": {
"Type": "String",
"CustomConstraint": "neutron.network"
}
}
}
"""
ENVIRONMENT = """
parameters:
InstanceType: m1.xsmall
db_password: verybadpass
KeyName: heat_key
"""
SNAPSHOT_CREATE = """
{
"status": "IN_PROGRESS",
"name": "None",
"data": "None",
"creation_time": "2016-02-19T07:25:23.494152",
"status_reason": "None",
"id": "8af90c07-b788-44ee-a8ab-5990197f5e32"
}
"""
class Environment(object):
def __init__(self, data):
self.data = data
class Template(object):
def __init__(self, data, validate):
self.data = data
self.validate = validate
class Snapshot(object):
def __init__(self, data):
self.data = data
def data(TEST):
TEST.stacks = utils.TestDataContainer()
TEST.stack_templates = utils.TestDataContainer()
TEST.stack_environments = utils.TestDataContainer()
TEST.stack_snapshot_create = utils.TestDataContainer()
TEST.stack_snapshot = utils.TestDataContainer()
TEST.resource_types = utils.TestDataContainer()
TEST.heat_resources = utils.TestDataContainer()
TEST.heat_services = utils.TestDataContainer()
# Services
service_1 = services.Service(services.ServiceManager(None), {
"status": "up",
"binary": "heat-engine",
"report_interval": 60,
"engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d1",
"created_at": "2015-02-06T03:23:32.000000",
"hostname": "mrkanag",
"updated_at": "2015-02-20T09:49:52.000000",
"topic": "engine",
"host": "engine-1",
"deleted_at": None,
"id": "1efd7015-5016-4caa-b5c8-12438af7b100"
})
service_2 = services.Service(services.ServiceManager(None), {
"status": "up",
"binary": "heat-engine",
"report_interval": 60,
"engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d2",
"created_at": "2015-02-06T03:23:32.000000",
"hostname": "mrkanag",
"updated_at": "2015-02-20T09:49:52.000000",
"topic": "engine",
"host": "engine-2",
"deleted_at": None,
"id": "1efd7015-5016-4caa-b5c8-12438af7b100"
})
TEST.heat_services.add(service_1)
TEST.heat_services.add(service_2)
# Data return by heatclient.
TEST.api_resource_types = utils.TestDataContainer()
for i in range(10):
stack_data = {
"description": "No description",
"links": [{
"href": "http://192.168.1.70:8004/v1/"
"051c727ee67040d6a7b7812708485a97/"
"stacks/stack-test{0}/"
"05b4f39f-ea96-4d91-910c-e758c078a089{0}".format(i),
"rel": "self"
}],
"parameters": {
'DBUsername': '******',
'InstanceType': 'm1.small',
'AWS::StackId': (
'arn:openstack:heat::2ce287:stacks/teststack/88553ec'),
'DBRootPassword': '******',
'AWS::StackName': "teststack{0}".format(i),
'DBPassword': '******',
'AWS::Region': 'ap-southeast-1',
'DBName': u'wordpress'
},
"stack_status_reason": "Stack successfully created",
"stack_name": "stack-test{0}".format(i),
"creation_time": "2013-04-22T00:11:39Z",
"updated_time": "2013-04-22T00:11:39Z",
"stack_status": "CREATE_COMPLETE",
"id": "05b4f39f-ea96-4d91-910c-e758c078a089{0}".format(i)
}
stack = stacks.Stack(stacks.StackManager(None), stack_data)
TEST.stacks.add(stack)
for i in range(10):
snapshot_data = {
"status": "COMPLETE",
"name": 'null',
"data": {
"files": {},
"status": "COMPLETE",
"name": "zhao3",
"tags": ["a", " 123", " b", " 456"],
"stack_user_project_id": "3cba4460875444049a2a7cc5420ccddb",
"environment": {
"encrypted_param_names": [],
"parameter_defaults": {},
"event_sinks": [],
"parameters": {},
"resource_registry": {
"resources": {}
}
},
"template": {
"heat_template_version": "2013-05-23",
"description":
"HOT template for Test.",
"resources": {
"private_subnet": {
"type": "OS::Neutron::Subnet",
"properties": {
"network_id": {"get_resource": "private_net"},
"cidr": "172.16.2.0/24",
"gateway_ip": "172.16.2.1"
}
},
"private_net": {
"type": "OS::Neutron::Net",
"properties": {"name": "private-net"}
}
}
},
"action": "SNAPSHOT",
"project_id": "1acd0026829f4d28bb2eff912d7aad0d",
"id": "70650725-bdbd-419f-b53f-5707767bfe0e",
"resources": {
"private_subnet": {
"status": "COMPLETE",
"name": "private_subnet",
"resource_data": {},
"resource_id": "9c7211b3-31c7-41f6-b92a-442ad3f71ef0",
"action": "SNAPSHOT",
"type": "OS::Neutron::Subnet",
"metadata": {}
},
"private_net": {
"status": "COMPLETE",
"name": "private_net",
"resource_data": {},
"resource_id": "ff4fd287-31b2-4d00-bc96-c409bc1db027",
"action": "SNAPSHOT",
"type": "OS::Neutron::Net",
"metadata": {}
}
}
},
"creation_time": "2016-02-21T04:02:54",
"status_reason": "Stack SNAPSHOT completed successfully",
"id": "01558a3b-ba05-4427-bbb4-1e4ab71cfcad"
}
TEST.stack_snapshot.add(snapshot_data)
TEST.stack_templates.add(Template(TEMPLATE, VALIDATE))
TEST.stack_environments.add(Environment(ENVIRONMENT))
TEST.stack_snapshot_create.add(Snapshot(SNAPSHOT_CREATE))
# Resource types list
r_type_1 = {
"resource_type": "AWS::CloudFormation::Stack",
"attributes": {},
"properties": {
"Parameters": {
"description":
"The set of parameters passed to this nested stack.",
"immutable": False,
"required": False,
"type": "map",
"update_allowed": True},
"TemplateURL": {
"description": "The URL of a template that specifies"
" the stack to be created as a resource.",
"immutable": False,
"required": True,
"type": "string",
"update_allowed": True},
"TimeoutInMinutes": {
"description": "The length of time, in minutes,"
" to wait for the nested stack creation.",
"immutable": False,
"required": False,
"type": "number",
"update_allowed": True}
}
}
r_type_2 = {
"resource_type": "OS::Heat::CloudConfig",
"attributes": {
"config": {
"description": "The config value of the software config."}
},
"properties": {
"cloud_config": {
"description": "Map representing the cloud-config data"
" structure which will be formatted as YAML.",
"immutable": False,
"required": False,
"type": "map",
"update_allowed": False}
}
}
r_types_list = [r_type_1, r_type_2]
for rt in r_types_list:
r_type = resource_types.ResourceType(
resource_types.ResourceTypeManager(None), rt['resource_type'])
TEST.resource_types.add(r_type)
TEST.api_resource_types.add(rt)
# Resources
resource_1 = resources.Resource(resources.ResourceManager(None), {
"logical_resource_id": "my_resource",
"physical_resource_id": "7b5e29b1-c94d-402d-b69c-df9ac6dfc0ce",
"resource_name": "my_resource",
"links": [
{
"href": "http://192.168.1.70:8004/v1/"
"051c727ee67040d6a7b7812708485a97/"
"stacks/%s/%s/resources/my_resource" %
(TEST.stacks.first().stack_name,
TEST.stacks.first().id),
"rel": "self"
},
{
"href": "http://192.168.1.70:8004/v1/"
"051c727ee67040d6a7b7812708485a97/"
"stacks/%s/%s" %
(TEST.stacks.first().stack_name,
TEST.stacks.first().id),
"rel": "stack"
}
],
"attributes": {
"metadata": {}
}
})
TEST.heat_resources.add(resource_1)
| apache-2.0 | 8,522,080,738,482,221,000 | 25.732993 | 79 | 0.535976 | false |
Jumpscale/jumpscale6_core | apps/portalbase/wiki/Help/.macros/wiki/menuadmin_jdoc/1_menuadmin.py | 2 | 1685 |
def main(j, args, params, tags, tasklet):
params.merge(args)
doc = params.doc
tags = params.tags
params.result = ""
# spaces = sorted(j.core.portal.active.getSpaces())
# spacestxt=""
# for item in spaces:
# if item[0] != "_" and item.strip() != "" and item.find("space_system")==-1 and item.find("test")==-1 and item.find("gridlogs")==-1:
# spacestxt += "%s:/%s\n" % (item, item.lower().strip("/"))
C = """
{{menudropdown: name:Doc
Edit:/system/edit?space=$$space&page=$$page
--------------
Logout:/system/login?user_logoff_=1
Access:/system/OverviewAccess?space=$$space
System:/system
--------------
Doc Core:/doc_jumpscale_core
Doc Devel:/doc_jumpscale_devel
Doc Grid:/doc_jumpscale_grid
Doc Howto:/doc_jumpscale_howto
Doc Portal:/doc_jumpscale_portal
"""
# C+=spacestxt
C+='}}'
#was inside
#Reload:javascript:$.ajax({'url': '/system/ReloadSpace?name=$$space'}).done(function(){location.reload()});void(0);
#ShowLogs:/system/ShowSpaceAccessLog?space=$$space
#ResetLogs:/system/ResetAccessLog?space=$$space
#Spaces:/system/Spaces
#Pages:/system/Pages?space=$$space
#ReloadAll:javascript:(function loadAll() {$.ajax({'url': '/system/ReloadApplication'});(function checkSpaceIsUp(trials) {if (trials <= 0) return;setTimeout(function() {$.ajax({'url': '/system/'}).done(function(){location.reload();console.log('Reloaded');}).error(function(){checkSpaceIsUp(trials - 1)});}, 1000);})(10);})();void(0);
if j.core.portal.active.isAdminFromCTX(params.requestContext):
params.result = C
params.result = (params.result, doc)
return params
def match(j, args, params, tags, tasklet):
return True
| bsd-2-clause | 6,718,596,536,480,113,000 | 31.403846 | 333 | 0.652819 | false |
anthonypdawson/LazyLibrarian | lazylibrarian/providers.py | 1 | 4430 | import time, threading, urllib, urllib2, re
from xml.etree import ElementTree
import lazylibrarian
from lazylibrarian import logger, SimpleCache
def NewzNab(book=None):
HOST = lazylibrarian.NEWZNAB_HOST
results = []
logger.info('Searching for %s.' % book['searchterm'])
if lazylibrarian.EBOOK_TYPE == None:
params = {
"t": "book",
"apikey": lazylibrarian.NEWZNAB_API,
#"cat": 7020,
"author": book['searchterm']
}
else:
params = {
"t": "search",
"apikey": lazylibrarian.NEWZNAB_API,
"cat": 7020,
"q": book['searchterm'],
"extended": 1,
}
if not str(HOST)[:4] == "http":
HOST = 'http://' + HOST
URL = HOST + '/api?' + urllib.urlencode(params)
try :
request = urllib2.Request(URL)
request.add_header('User-Agent', 'lazylibrary/0.0 +https://github.com/LibrarianMike/LazyLibrarian')
opener = urllib2.build_opener(SimpleCache.CacheHandler(".urllib2cache"), SimpleCache.ThrottlingProcessor(5))
resp = opener.open(request)
try:
data = ElementTree.parse(resp)
except (urllib2.URLError, IOError, EOFError), e:
logger.warn('Error fetching data from %s: %s' % (lazylibrarian.NEWZNAB_HOST, e))
data = None
except Exception, e:
logger.error("Error 403 openning url")
data = None
if data:
# to debug because of api
logger.debug(u'Parsing results from <a href="%s">%s</a>' % (URL, lazylibrarian.NEWZNAB_HOST))
rootxml = data.getroot()
resultxml = rootxml.getiterator('item')
nzbcount = 0
for nzb in resultxml:
try:
nzbcount = nzbcount+1
results.append({
'bookid': book['bookid'],
'nzbprov': "NewzNab",
'nzbtitle': nzb[0].text,
'nzburl': nzb[2].text,
'nzbdate': nzb[4].text,
'nzbsize': nzb[7].attrib.get('length')
})
except IndexError:
logger.debug('No results')
if nzbcount:
logger.debug('Found %s nzb for: %s' % (nzbcount, book['searchterm']))
else:
logger.debug(u'Newznab returned 0 results for: ' + book['searchterm'] + '. Adding book to queue.')
return results
def NZBMatrix(book=None):
results = []
if ((lazylibrarian.EBOOK_TYPE == None) or (lazylibrarian.EBOOK_TYPE == "")):
params = {
"page": "download",
"username": lazylibrarian.NZBMATRIX_USER,
"apikey": lazylibrarian.NZBMATRIX_API,
"subcat": 36,
"age": lazylibrarian.USENET_RETENTION,
"term": book['searchterm']
}
else:
params = {
"page": "download",
"username": lazylibrarian.NZBMATRIX_USER,
"apikey": lazylibrarian.NZBMATRIX_API,
"subcat": 36,
"age": lazylibrarian.USENET_RETENTION,
"term": book['searchterm']
}
logger.debug('Searching for: ' + book['searchterm'])
URL = "http://rss.nzbmatrix.com/rss.php?" + urllib.urlencode(params)
# to debug because of api
logger.debug(u'Parsing results from <a href="%s">NZBMatrix</a>' % (URL))
try:
data = ElementTree.parse(urllib2.urlopen(URL, timeout=30))
except (urllib2.URLError, IOError, EOFError), e:
logger.warn('Error fetching data from NZBMatrix: %s' % e)
data = None
if data:
rootxml = data.getroot()
resultxml = rootxml.getiterator('item')
nzbcount = 0
for nzb in resultxml:
try:
results.append({
'bookid': book['bookid'],
'nzbprov': "NZBMatrix",
'nzbtitle': nzb[0].text,
'nzburl': nzb[2].text,
'nzbsize': nzb[7].attrib.get('length')
})
nzbcount = nzbcount+1
except IndexError:
logger.debug('No results')
if nzbcount:
logger.debug('Found %s nzb for: %s' % (nzbcount, book['searchterm']))
else:
logger.debug('NZBMatrix returned 0 results for: ' + book['searchterm'] + '. Adding book to queue.')
return results
| gpl-3.0 | 2,962,947,908,830,664,000 | 32.308271 | 116 | 0.531603 | false |
geodrinx/gearthview | ext-libs/twisted/internet/_utilspy3.py | 4 | 1916 | # -*- test-case-name: twisted.internet.test.test_utilspy3 -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utility methods, ported to Python 3.
"""
from __future__ import division, absolute_import
import sys, warnings
from functools import wraps
from twisted.python.compat import reraise
from twisted.internet import defer
def _resetWarningFilters(passthrough, addedFilters):
for f in addedFilters:
try:
warnings.filters.remove(f)
except ValueError:
pass
return passthrough
def runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw):
"""Run the function C{f}, but with some warnings suppressed.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable, followed by its arguments and keyword arguments
"""
for args, kwargs in suppressedWarnings:
warnings.filterwarnings(*args, **kwargs)
addedFilters = warnings.filters[:len(suppressedWarnings)]
try:
result = f(*a, **kw)
except:
exc_info = sys.exc_info()
_resetWarningFilters(None, addedFilters)
reraise(exc_info[1], exc_info[2])
else:
if isinstance(result, defer.Deferred):
result.addBoth(_resetWarningFilters, addedFilters)
else:
_resetWarningFilters(None, addedFilters)
return result
def suppressWarnings(f, *suppressedWarnings):
"""
Wrap C{f} in a callable which suppresses the indicated warnings before
invoking C{f} and unsuppresses them afterwards. If f returns a Deferred,
warnings will remain suppressed until the Deferred fires.
"""
@wraps(f)
def warningSuppressingWrapper(*a, **kw):
return runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw)
return warningSuppressingWrapper
| gpl-3.0 | -5,045,512,847,922,240,000 | 31.474576 | 77 | 0.680585 | false |
maestro-hybrid-cloud/horizon | openstack_dashboard/dashboards/identity/domains/views.py | 55 | 3604 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.dashboards.identity.domains import constants
from openstack_dashboard.dashboards.identity.domains \
import tables as project_tables
from openstack_dashboard.dashboards.identity.domains \
import workflows as project_workflows
class IndexView(tables.DataTableView):
table_class = project_tables.DomainsTable
template_name = constants.DOMAINS_INDEX_VIEW_TEMPLATE
page_title = _("Domains")
def get_data(self):
domains = []
domain_context = self.request.session.get('domain_context', None)
if policy.check((("identity", "identity:list_domains"),),
self.request):
try:
if domain_context:
domain = api.keystone.domain_get(self.request,
domain_context)
domains.append(domain)
else:
domains = api.keystone.domain_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve domain list.'))
elif policy.check((("identity", "identity:get_domain"),),
self.request):
try:
domain = api.keystone.domain_get(self.request,
self.request.user.domain_id)
domains.append(domain)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve domain information.'))
else:
msg = _("Insufficient privilege level to view domain information.")
messages.info(self.request, msg)
return domains
class CreateDomainView(workflows.WorkflowView):
workflow_class = project_workflows.CreateDomain
class UpdateDomainView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateDomain
def get_initial(self):
initial = super(UpdateDomainView, self).get_initial()
domain_id = self.kwargs['domain_id']
initial['domain_id'] = domain_id
try:
# get initial domain info
domain_info = api.keystone.domain_get(self.request,
domain_id)
for field in constants.DOMAIN_INFO_FIELDS:
initial[field] = getattr(domain_info, field, None)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve domain details.'),
redirect=reverse(constants.DOMAINS_INDEX_URL))
return initial
| apache-2.0 | -4,943,588,318,440,621,000 | 38.604396 | 79 | 0.61737 | false |
abhattad4/Digi-Menu | digimenu2/tests/foreign_object/tests.py | 113 | 18071 | import datetime
from operator import attrgetter
from django import forms
from django.core.exceptions import FieldError
from django.test import TestCase, skipUnlessDBFeature
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
self.assertRaises(Person.DoesNotExist, getattr, membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
self.assertQuerysetEqual(
self.bob.membership_set.all(), [
self.cia.id
],
attrgetter("group_id")
)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertQuerysetEqual(Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m1], lambda x: x)
self.assertQuerysetEqual(Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m2], lambda x: x)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
normal_membership_sets = [list(p.membership_set.all())
for p in Person.objects.order_by('pk')]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_foward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertQuerysetEqual(
NewsArticle.objects.select_related('active_translation'),
[na], lambda x: x
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
""" See: https://code.djangoproject.com/ticket/21566 """
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
class FormsTests(TestCase):
# ForeignObjects should not have any form fields, currently the user needs
# to manually deal with the foreignobject relation.
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
def test_foreign_object_form(self):
# A very crude test checking that the non-concrete fields do not get form fields.
form = FormsTests.ArticleForm()
self.assertIn('id_pub_date', form.as_table())
self.assertNotIn('active_translation', form.as_table())
form = FormsTests.ArticleForm(data={'pub_date': str(datetime.date.today())})
self.assertTrue(form.is_valid())
a = form.save()
self.assertEqual(a.pub_date, datetime.date.today())
form = FormsTests.ArticleForm(instance=a, data={'pub_date': '2013-01-01'})
a2 = form.save()
self.assertEqual(a.pk, a2.pk)
self.assertEqual(a2.pub_date, datetime.date(2013, 1, 1))
| bsd-3-clause | 740,984,528,308,543,400 | 42.649758 | 100 | 0.629462 | false |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/v8/tools/testrunner/objects/testcase.py | 5 | 4522 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import output
class TestCase(object):
def __init__(self, suite, path, variant=None, flags=None,
override_shell=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
self.override_shell = override_shell
self.outcomes = frozenset([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, variant, flags):
copy = TestCase(self.suite, self.path, variant, self.flags + flags,
self.override_shell)
copy.outcomes = self.outcomes
return copy
def PackTask(self):
"""
Extracts those parts of this object that are required to run the test
and returns them as a JSON serializable object.
"""
assert self.id is not None
return [self.suitename(), self.path, self.variant, self.flags,
self.override_shell, list(self.outcomes or []),
self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
test.outcomes = frozenset(task[5])
test.id = task[6]
test.run = 1
return test
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
def PackResult(self):
"""Serializes the output of the TestCase after it has run."""
self.suite.StripOutputForTransmit(self)
return [self.id, self.output.Pack(), self.duration]
def MergeResult(self, result):
"""Applies the contents of a Result to this object."""
assert result[0] == self.id
self.output = output.Output.Unpack(result[1])
self.duration = result[2]
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
def shell(self):
if self.override_shell:
return self.override_shell
return self.suite.shell()
def __getstate__(self):
"""Representation to pickle test cases.
The original suite won't be sent beyond process boundaries. Instead
send the name only and retrieve a process-local suite later.
"""
return dict(self.__dict__, suite=self.suite.name)
def __cmp__(self, other):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
return cmp(
(self.suite.name, self.path, self.flags),
(other.suite.name, other.path, other.flags),
)
def __str__(self):
return "[%s/%s %s]" % (self.suite.name, self.path, self.flags)
| gpl-3.0 | 6,685,673,053,489,947,000 | 39.017699 | 76 | 0.693498 | false |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/_pytest/helpconfig.py | 180 | 5120 | """ version info, help messages, tracing configuration. """
import py
import pytest
import os, sys
def pytest_addoption(parser):
group = parser.getgroup('debugconfig')
group.addoption('--version', action="store_true",
help="display pytest lib version and import information.")
group._addoption("-h", "--help", action="store_true", dest="help",
help="show help message and configuration info")
group._addoption('-p', action="append", dest="plugins", default = [],
metavar="name",
help="early-load given plugin (multi-allowed). "
"To avoid loading of plugins, use the `no:` prefix, e.g. "
"`no:doctest`.")
group.addoption('--traceconfig', '--trace-config',
action="store_true", default=False,
help="trace considerations of conftest.py files."),
group.addoption('--debug',
action="store_true", dest="debug", default=False,
help="store internal tracing debug information in 'pytestdebug.log'.")
@pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse():
outcome = yield
config = outcome.get_result()
if config.option.debug:
path = os.path.abspath("pytestdebug.log")
debugfile = open(path, 'w')
debugfile.write("versions pytest-%s, py-%s, "
"python-%s\ncwd=%s\nargs=%s\n\n" %(
pytest.__version__, py.__version__,
".".join(map(str, sys.version_info)),
os.getcwd(), config._origargs))
config.trace.root.setwriter(debugfile.write)
undo_tracing = config.pluginmanager.enable_tracing()
sys.stderr.write("writing pytestdebug information to %s\n" % path)
def unset_tracing():
debugfile.close()
sys.stderr.write("wrote pytestdebug information to %s\n" %
debugfile.name)
config.trace.root.setwriter(None)
undo_tracing()
config.add_cleanup(unset_tracing)
def pytest_cmdline_main(config):
if config.option.version:
p = py.path.local(pytest.__file__)
sys.stderr.write("This is pytest version %s, imported from %s\n" %
(pytest.__version__, p))
plugininfo = getpluginversioninfo(config)
if plugininfo:
for line in plugininfo:
sys.stderr.write(line + "\n")
return 0
elif config.option.help:
config._do_configure()
showhelp(config)
config._ensure_unconfigure()
return 0
def showhelp(config):
reporter = config.pluginmanager.get_plugin('terminalreporter')
tw = reporter._tw
tw.write(config._parser.optparser.format_help())
tw.line()
tw.line()
#tw.sep( "=", "config file settings")
tw.line("[pytest] ini-options in the next "
"pytest.ini|tox.ini|setup.cfg file:")
tw.line()
for name in config._parser._ininames:
help, type, default = config._parser._inidict[name]
if type is None:
type = "string"
spec = "%s (%s)" % (name, type)
line = " %-24s %s" %(spec, help)
tw.line(line[:tw.fullwidth])
tw.line()
tw.line("environment variables:")
vars = [
("PYTEST_ADDOPTS", "extra command line options"),
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
]
for name, help in vars:
tw.line(" %-24s %s" % (name, help))
tw.line()
tw.line()
tw.line("to see available markers type: py.test --markers")
tw.line("to see available fixtures type: py.test --fixtures")
tw.line("(shown according to specified file_or_dir or current dir "
"if not specified)")
for warningreport in reporter.stats.get('warnings', []):
tw.line("warning : " + warningreport.message, red=True)
return
conftest_options = [
('pytest_plugins', 'list of plugin names to load'),
]
def getpluginversioninfo(config):
lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("setuptools registered plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, '__file__', repr(plugin))
content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
lines.append(" " + content)
return lines
def pytest_report_header(config):
lines = []
if config.option.debug or config.option.traceconfig:
lines.append("using: pytest-%s pylib-%s" %
(pytest.__version__,py.__version__))
verinfo = getpluginversioninfo(config)
if verinfo:
lines.extend(verinfo)
if config.option.traceconfig:
lines.append("active plugins:")
items = config.pluginmanager.list_name_plugin()
for name, plugin in items:
if hasattr(plugin, '__file__'):
r = plugin.__file__
else:
r = repr(plugin)
lines.append(" %-20s: %s" %(name, r))
return lines
| mit | 8,628,898,305,592,721,000 | 35.834532 | 85 | 0.588281 | false |
anu7495/airmozilla | airmozilla/manage/tests/test_helpers.py | 5 | 2690 | import datetime
import time
import jinja2
from nose.tools import ok_, eq_
from django.test import TestCase
from airmozilla.main.models import Event
from airmozilla.manage.helpers import (
almost_equal,
event_status_to_css_label,
format_message,
formatduration,
)
class TestAlmostEqual(TestCase):
def test_almost_equal(self):
date1 = datetime.datetime.now()
time.sleep(0.001)
date2 = datetime.datetime.now()
assert date1 != date2
ok_(almost_equal(date1, date2))
ok_(almost_equal(date2, date1))
def test_almost_equal_different_days(self):
date1 = date2 = datetime.datetime.now()
date2 += datetime.timedelta(days=1)
ok_(not almost_equal(date1, date2))
ok_(not almost_equal(date2, date1))
def test_not_equal_but_close(self):
date1 = date2 = datetime.datetime.now()
date2 += datetime.timedelta(seconds=1)
ok_(not almost_equal(date1, date2))
ok_(not almost_equal(date2, date1))
class MiscTests(TestCase):
def test_event_status_to_css_label(self):
label = event_status_to_css_label(Event.STATUS_REMOVED)
eq_(label, 'label-danger')
label = event_status_to_css_label(Event.STATUS_INITIATED)
eq_(label, 'label-default')
label = event_status_to_css_label(Event.STATUS_SCHEDULED)
eq_(label, 'label-success')
label = event_status_to_css_label(Event.STATUS_PENDING)
eq_(label, 'label-primary')
def test_format_message(self):
result = format_message('bla')
eq_(result, 'bla')
ok_(not isinstance(result, jinja2.Markup))
# or it's an object
class M(object):
message = 'ble'
m = M()
eq_(format_message(m), 'ble')
# or a message containing a markdown style relative
result = format_message("Go [to](/page.html)")
eq_(
result,
'Go <a href="/page.html" class="message-inline">to</a>'
)
ok_(isinstance(result, jinja2.Markup))
# or if it contains a balance <code> tag
result = format_message("<code>Code</code>")
eq_(
result,
'<code>Code</code>'
)
ok_(isinstance(result, jinja2.Markup))
def test_formatduration(self):
output = formatduration(10)
eq_(output, '10s')
output = formatduration(60)
eq_(output, u'1m\xa00s')
output = formatduration(70)
eq_(output, u'1m\xa010s')
output = formatduration(60 * 60)
eq_(output, u'1h\xa00m\xa00s')
output = formatduration(60 * 60 + 61)
eq_(output, u'1h\xa01m\xa01s')
| bsd-3-clause | 3,100,442,478,348,697,000 | 28.888889 | 67 | 0.597398 | false |
DigiThinkIT/stem | stem/response/authchallenge.py | 5 | 1910 | # Copyright 2012-2014, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import binascii
import stem.response
import stem.socket
import stem.util.str_tools
import stem.util.tor_tools
class AuthChallengeResponse(stem.response.ControlMessage):
"""
AUTHCHALLENGE query response.
:var str server_hash: server hash provided by tor
:var str server_nonce: server nonce provided by tor
"""
def _parse_message(self):
# Example:
# 250 AUTHCHALLENGE SERVERHASH=680A73C9836C4F557314EA1C4EDE54C285DB9DC89C83627401AEF9D7D27A95D5 SERVERNONCE=F8EA4B1F2C8B40EF1AF68860171605B910E3BBCABADF6FC3DB1FA064F4690E85
self.server_hash = None
self.server_nonce = None
if not self.is_ok():
raise stem.ProtocolError("AUTHCHALLENGE response didn't have an OK status:\n%s" % self)
elif len(self) > 1:
raise stem.ProtocolError('Received multiline AUTHCHALLENGE response:\n%s' % self)
line = self[0]
# sanity check that we're a AUTHCHALLENGE response
if not line.pop() == 'AUTHCHALLENGE':
raise stem.ProtocolError('Message is not an AUTHCHALLENGE response (%s)' % self)
if line.is_next_mapping('SERVERHASH'):
value = line.pop_mapping()[1]
if not stem.util.tor_tools.is_hex_digits(value, 64):
raise stem.ProtocolError('SERVERHASH has an invalid value: %s' % value)
self.server_hash = binascii.a2b_hex(stem.util.str_tools._to_bytes(value))
else:
raise stem.ProtocolError('Missing SERVERHASH mapping: %s' % line)
if line.is_next_mapping('SERVERNONCE'):
value = line.pop_mapping()[1]
if not stem.util.tor_tools.is_hex_digits(value, 64):
raise stem.ProtocolError('SERVERNONCE has an invalid value: %s' % value)
self.server_nonce = binascii.a2b_hex(stem.util.str_tools._to_bytes(value))
else:
raise stem.ProtocolError('Missing SERVERNONCE mapping: %s' % line)
| lgpl-3.0 | 5,278,053,603,694,529,000 | 33.107143 | 178 | 0.71466 | false |
w1ll1am23/home-assistant | homeassistant/components/vesync/switch.py | 5 | 3299 | """Support for VeSync switches."""
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .common import VeSyncDevice
from .const import DOMAIN, VS_DISCOVERY, VS_DISPATCHERS, VS_SWITCHES
_LOGGER = logging.getLogger(__name__)
DEV_TYPE_TO_HA = {
"wifi-switch-1.3": "outlet",
"ESW03-USA": "outlet",
"ESW01-EU": "outlet",
"ESW15-USA": "outlet",
"ESWL01": "switch",
"ESWL03": "switch",
"ESO15-TB": "outlet",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches."""
async def async_discover(devices):
"""Add new devices to platform."""
_async_setup_entities(devices, async_add_entities)
disp = async_dispatcher_connect(
hass, VS_DISCOVERY.format(VS_SWITCHES), async_discover
)
hass.data[DOMAIN][VS_DISPATCHERS].append(disp)
_async_setup_entities(hass.data[DOMAIN][VS_SWITCHES], async_add_entities)
return True
@callback
def _async_setup_entities(devices, async_add_entities):
"""Check if device is online and add entity."""
dev_list = []
for dev in devices:
if DEV_TYPE_TO_HA.get(dev.device_type) == "outlet":
dev_list.append(VeSyncSwitchHA(dev))
elif DEV_TYPE_TO_HA.get(dev.device_type) == "switch":
dev_list.append(VeSyncLightSwitch(dev))
else:
_LOGGER.warning(
"%s - Unknown device type - %s", dev.device_name, dev.device_type
)
continue
async_add_entities(dev_list, update_before_add=True)
class VeSyncBaseSwitch(VeSyncDevice, SwitchEntity):
"""Base class for VeSync switch Device Representations."""
def turn_on(self, **kwargs):
"""Turn the device on."""
self.device.turn_on()
class VeSyncSwitchHA(VeSyncBaseSwitch, SwitchEntity):
"""Representation of a VeSync switch."""
def __init__(self, plug):
"""Initialize the VeSync switch device."""
super().__init__(plug)
self.smartplug = plug
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
if not hasattr(self.smartplug, "weekly_energy_total"):
return {}
return {
"voltage": self.smartplug.voltage,
"weekly_energy_total": self.smartplug.weekly_energy_total,
"monthly_energy_total": self.smartplug.monthly_energy_total,
"yearly_energy_total": self.smartplug.yearly_energy_total,
}
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.smartplug.power
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return self.smartplug.energy_today
def update(self):
"""Update outlet details and energy usage."""
self.smartplug.update()
self.smartplug.update_energy()
class VeSyncLightSwitch(VeSyncBaseSwitch, SwitchEntity):
"""Handle representation of VeSync Light Switch."""
def __init__(self, switch):
"""Initialize Light Switch device class."""
super().__init__(switch)
self.switch = switch
| apache-2.0 | -5,434,911,877,750,015,000 | 29.546296 | 81 | 0.639285 | false |
pgmillon/ansible | lib/ansible/modules/crypto/acme/acme_inspect.py | 20 | 12317 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018 Felix Fontein (@felixfontein)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: acme_inspect
author: "Felix Fontein (@felixfontein)"
version_added: "2.8"
short_description: Send direct requests to an ACME server
description:
- "Allows to send direct requests to an ACME server with the
L(ACME protocol,https://tools.ietf.org/html/rfc8555),
which is supported by CAs such as L(Let's Encrypt,https://letsencrypt.org/)."
- "This module can be used to debug failed certificate request attempts,
for example when M(acme_certificate) fails or encounters a problem which
you wish to investigate."
- "The module can also be used to directly access features of an ACME servers
which are not yet supported by the Ansible ACME modules."
notes:
- "The I(account_uri) option must be specified for properly authenticated
ACME v2 requests (except a C(new-account) request)."
- "Using the C(ansible) tool, M(acme_inspect) can be used to directly execute
ACME requests without the need of writing a playbook. For example, the
following command retrieves the ACME account with ID 1 from Let's Encrypt
(assuming C(/path/to/key) is the correct private account key):
C(ansible localhost -m acme_inspect -a \"account_key_src=/path/to/key
acme_directory=https://acme-v02.api.letsencrypt.org/directory acme_version=2
account_uri=https://acme-v02.api.letsencrypt.org/acme/acct/1 method=get
url=https://acme-v02.api.letsencrypt.org/acme/acct/1\")"
seealso:
- name: Automatic Certificate Management Environment (ACME)
description: The specification of the ACME protocol (RFC 8555).
link: https://tools.ietf.org/html/rfc8555
- name: ACME TLS ALPN Challenge Extension
description: The current draft specification of the C(tls-alpn-01) challenge.
link: https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05
extends_documentation_fragment:
- acme
options:
url:
description:
- "The URL to send the request to."
- "Must be specified if I(method) is not C(directory-only)."
type: str
method:
description:
- "The method to use to access the given URL on the ACME server."
- "The value C(post) executes an authenticated POST request. The content
must be specified in the I(content) option."
- "The value C(get) executes an authenticated POST-as-GET request for ACME v2,
and a regular GET request for ACME v1."
- "The value C(directory-only) only retrieves the directory, without doing
a request."
type: str
default: get
choices:
- get
- post
- directory-only
content:
description:
- "An encoded JSON object which will be sent as the content if I(method)
is C(post)."
- "Required when I(method) is C(post), and not allowed otherwise."
type: str
fail_on_acme_error:
description:
- "If I(method) is C(post) or C(get), make the module fail in case an ACME
error is returned."
type: bool
default: yes
'''
EXAMPLES = r'''
- name: Get directory
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
method: directory-only
register: directory
- name: Create an account
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
url: "{{ directory.newAccount}}"
method: post
content: '{"termsOfServiceAgreed":true}'
register: account_creation
# account_creation.headers.location contains the account URI
# if creation was successful
- name: Get account information
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ account_creation.headers.location }}"
method: get
- name: Update account contacts
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ account_creation.headers.location }}"
method: post
content: '{{ account_info | to_json }}'
vars:
account_info:
# For valid values, see
# https://tools.ietf.org/html/rfc8555#section-7.3
contact:
- mailto:[email protected]
- name: Create certificate order
acme_certificate:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
csr: /etc/pki/cert/csr/sample.com.csr
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
challenge: http-01
register: certificate_request
# Assume something went wrong. certificate_request.order_uri contains
# the order URI.
- name: Get order information
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ certificate_request.order_uri }}"
method: get
register: order
- name: Get first authz for order
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ order.output_json.authorizations[0] }}"
method: get
register: authz
- name: Get HTTP-01 challenge for authz
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ authz.output_json.challenges | selectattr('type', 'equalto', 'http-01') }}"
method: get
register: http01challenge
- name: Activate HTTP-01 challenge manually
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ http01challenge.url }}"
method: post
content: '{}'
'''
RETURN = '''
directory:
description: The ACME directory's content
returned: always
type: dict
sample: |
{
"a85k3x9f91A4": "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417",
"keyChange": "https://acme-v02.api.letsencrypt.org/acme/key-change",
"meta": {
"caaIdentities": [
"letsencrypt.org"
],
"termsOfService": "https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf",
"website": "https://letsencrypt.org"
},
"newAccount": "https://acme-v02.api.letsencrypt.org/acme/new-acct",
"newNonce": "https://acme-v02.api.letsencrypt.org/acme/new-nonce",
"newOrder": "https://acme-v02.api.letsencrypt.org/acme/new-order",
"revokeCert": "https://acme-v02.api.letsencrypt.org/acme/revoke-cert"
}
headers:
description: The request's HTTP headers (with lowercase keys)
returned: always
type: dict
sample: |
{
"boulder-requester": "12345",
"cache-control": "max-age=0, no-cache, no-store",
"connection": "close",
"content-length": "904",
"content-type": "application/json",
"cookies": {},
"cookies_string": "",
"date": "Wed, 07 Nov 2018 12:34:56 GMT",
"expires": "Wed, 07 Nov 2018 12:44:56 GMT",
"link": "<https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf>;rel=\"terms-of-service\"",
"msg": "OK (904 bytes)",
"pragma": "no-cache",
"replay-nonce": "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGH",
"server": "nginx",
"status": 200,
"strict-transport-security": "max-age=604800",
"url": "https://acme-v02.api.letsencrypt.org/acme/acct/46161",
"x-frame-options": "DENY"
}
output_text:
description: The raw text output
returned: always
type: str
sample: "{\\n \\\"id\\\": 12345,\\n \\\"key\\\": {\\n \\\"kty\\\": \\\"RSA\\\",\\n ..."
output_json:
description: The output parsed as JSON
returned: if output can be parsed as JSON
type: dict
sample:
- id: 12345
- key:
- kty: RSA
- ...
'''
from ansible.module_utils.acme import (
ModuleFailException, ACMEAccount, set_crypto_backend,
)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_bytes
import json
def main():
module = AnsibleModule(
argument_spec=dict(
account_key_src=dict(type='path', aliases=['account_key']),
account_key_content=dict(type='str', no_log=True),
account_uri=dict(type='str'),
acme_directory=dict(type='str', default='https://acme-staging.api.letsencrypt.org/directory'),
acme_version=dict(type='int', default=1, choices=[1, 2]),
validate_certs=dict(type='bool', default=True),
url=dict(type='str'),
method=dict(type='str', choices=['get', 'post', 'directory-only'], default='get'),
content=dict(type='str'),
fail_on_acme_error=dict(type='bool', default=True),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'openssl', 'cryptography']),
),
mutually_exclusive=(
['account_key_src', 'account_key_content'],
),
required_if=(
['method', 'get', ['url']],
['method', 'post', ['url', 'content']],
['method', 'get', ['account_key_src', 'account_key_content'], True],
['method', 'post', ['account_key_src', 'account_key_content'], True],
),
)
set_crypto_backend(module)
if not module.params.get('validate_certs'):
module.warn(warning='Disabling certificate validation for communications with ACME endpoint. ' +
'This should only be done for testing against a local ACME server for ' +
'development purposes, but *never* for production purposes.')
result = dict()
changed = False
try:
# Get hold of ACMEAccount object (includes directory)
account = ACMEAccount(module)
method = module.params['method']
result['directory'] = account.directory.directory
# Do we have to do more requests?
if method != 'directory-only':
url = module.params['url']
fail_on_acme_error = module.params['fail_on_acme_error']
# Do request
if method == 'get':
data, info = account.get_request(url, parse_json_result=False, fail_on_error=False)
elif method == 'post':
changed = True # only POSTs can change
data, info = account.send_signed_request(url, to_bytes(module.params['content']), parse_json_result=False, encode_payload=False)
# Update results
result.update(dict(
headers=info,
output_text=to_native(data),
))
# See if we can parse the result as JSON
try:
result['output_json'] = json.loads(data)
except Exception as dummy:
pass
# Fail if error was returned
if fail_on_acme_error and info['status'] >= 400:
raise ModuleFailException("ACME request failed: CODE: {0} RESULT: {1}".format(info['status'], data))
# Done!
module.exit_json(changed=changed, **result)
except ModuleFailException as e:
e.do_fail(module, **result)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,306,604,026,821,203,000 | 36.666667 | 144 | 0.642039 | false |
antamb/google-personal-assistant | src/actionbase.py | 2 | 2216 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle voice commands locally.
This code lets you link keywords to actions. The actions are declared in
action.py.
"""
class Actor(object):
"""Passes commands on to a list of action handlers."""
def __init__(self):
self.handlers = []
def add_keyword(self, keyword, action):
self.handlers.append(KeywordHandler(keyword, action))
def get_phrases(self):
"""Get a list of all phrases that are expected by the handlers."""
return [phrase for h in self.handlers for phrase in h.get_phrases()]
def can_handle(self, command):
"""Check if command is handled without running the handlers.
Returns True if the command would be handled."""
for handler in self.handlers:
if handler.can_handle(command):
return True
return False
def handle(self, command):
"""Pass command to handlers, stopping after one has handled the command.
Returns True if the command was handled."""
for handler in self.handlers:
if handler.handle(command):
return True
return False
class KeywordHandler(object):
"""Perform the action when the given keyword is in the command."""
def __init__(self, keyword, action):
self.keyword = keyword.lower()
self.action = action
def get_phrases(self):
return [self.keyword]
def can_handle(self, command):
return self.keyword in command.lower()
def handle(self, command):
if self.can_handle(command):
self.action.run(command)
return True
return False
| apache-2.0 | 5,235,013,703,083,971,000 | 28.546667 | 80 | 0.66065 | false |
mffrench/fabric | bddtests/steps/docgen.py | 5 | 13747 | # Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from StringIO import StringIO
from itertools import chain
from google.protobuf.message import Message
from b3j0f.aop import weave, unweave, is_intercepted, weave_on
from jinja2 import Environment, PackageLoader, select_autoescape, FileSystemLoader, Template
env = Environment(
loader=FileSystemLoader(searchpath="templates"),
autoescape=select_autoescape(['html', 'xml']),
trim_blocks=True,
lstrip_blocks=True
)
from bootstrap_util import getDirectory
class DocumentGenerator:
def __init__(self, contextHelper, scenario):
self.contextHelper = contextHelper
self.directory = getDirectory(contextHelper.context)
self.output = StringIO()
self.currentStep = 0
self.composition = None
#Weave advices into contextHelper
weave(target=self.contextHelper.before_step, advices=self.beforeStepAdvice)
weave(target=self.contextHelper.after_step, advices=self.afterStepAdvice)
weave(target=self.contextHelper.after_scenario, advices=self.afterScenarioAdvice)
weave(target=self.contextHelper.getBootrapHelper, advices=self.getBootstrapHelperAdvice)
weave(target=self.contextHelper.registerComposition, advices=self.registerCompositionAdvice)
# Weave advices into Directory
weave(target=self.directory._registerOrg, advices=self.registerOrgAdvice)
weave(target=self.directory._registerUser, advices=self.registerUserAdvice)
weave(target=self.directory.registerOrdererAdminTuple, advices=self.registerNamedNodeAdminTupleAdvice)
def beforeStepAdvice(self, joinpoint):
self.currentStep += 1
step = joinpoint.kwargs['step']
# Now the jinja template
self.output.write(env.get_template("html/step.html").render(step_id="Step {0}".format(self.currentStep), step=step))
return joinpoint.proceed()
def afterStepAdvice(self, joinpoint):
step = joinpoint.kwargs['step']
# Now the jinja template
if step.status=="failed":
self.output.write(env.get_template("html/error.html").render(err=step.error_message))
return joinpoint.proceed()
def compositionCallCLIAdvice(self, joinpoint):
'This advice is called around the compositions usage of the cli'
result = joinpoint.proceed()
# Create table for environment
composition = joinpoint.kwargs['self']
envAdditions = composition.getEnvAdditions()
keys = envAdditions.keys()
keys.sort()
envPreamble = " ".join(["{0}={1}".format(key,envAdditions[key]) for key in keys])
args= " ".join(joinpoint.kwargs['argList'])
self.output.write(env.get_template("html/cli.html").render(command="{0} {1}".format(envPreamble, args)))
return result
def _getNetworkGroup(self, serviceName):
groups = {"peer" : 1, "orderer" : 2, "kafka" : 7, "zookeeper" : 8, "couchdb" : 9}
groupId = 0
for group, id in groups.iteritems():
if serviceName.lower().startswith(group):
groupId = id
return groupId
def _getNetworkForConfig(self, configAsYaml):
import yaml
config = yaml.load(configAsYaml)
assert "services" in config, "Expected config from docker-compose config to have services key at top level: \n{0}".format(config)
network = {"nodes": [], "links" : []}
for serviceName in config['services'].keys():
network['nodes'].append({"id" : serviceName, "group" : self._getNetworkGroup(serviceName), "type" : "node"})
# Now get links
if "depends_on" in config['services'][serviceName]:
for dependedOnServiceName in config['services'][serviceName]['depends_on']:
network['links'].append({"source": serviceName, "target": dependedOnServiceName, "value" : 1})
return network
def _getNetworkForDirectory(self):
network = {"nodes":[], "links": []}
for orgName, org in self.directory.getOrganizations().iteritems():
network['nodes'].append({"id" : orgName, "group" : 3, "type" : "org"})
for userName, user in self.directory.getUsers().iteritems():
network['nodes'].append({"id" : userName, "group" : 4, "type" : "user"})
# Now get links
for nct, cert in self.directory.getNamedCtxTuples().iteritems():
nctId = "{0}-{1}-{2}".format(nct.user, nct.nodeName, nct.organization)
network['nodes'].append({"id" : nctId, "group" : 5, "type" : "cert"})
network['links'].append({"source": nctId, "target": nct.organization, "value" : 1})
network['links'].append({"source": nctId, "target": nct.user, "value" : 1})
# Only add the context link if it is a compose service, else the target may not exist.
if nct.nodeName in self.composition.getServiceNames():
network['links'].append({"source": nctId, "target": nct.nodeName, "value" : 1})
return network
def _writeNetworkJson(self):
if self.composition:
import json
configNetwork = self._getNetworkForConfig(configAsYaml=self.composition.getConfig())
directoryNetwork = self._getNetworkForDirectory()
# Join the network info together
fullNetwork = dict(chain([(key, configNetwork[key] + directoryNetwork[key]) for key in configNetwork.keys()]))
(fileName, fileExists) = self.contextHelper.getTmpPathForName("network", extension="json")
with open(fileName, "w") as f:
f.write(json.dumps(fullNetwork))
def registerCompositionAdvice(self, joinpoint):
composition = joinpoint.kwargs['composition']
weave(target=composition._callCLI, advices=self.compositionCallCLIAdvice)
result = joinpoint.proceed()
if composition:
#Now get the config for the composition and dump out.
self.composition = composition
configAsYaml = composition.getConfig()
self.output.write(env.get_template("html/header.html").render(text="Configuration", level=4))
self.output.write(env.get_template("html/cli.html").render(command=configAsYaml))
#Inject the graph
self.output.write(env.get_template("html/header.html").render(text="Network Graph", level=4))
self.output.write(env.get_template("html/graph.html").render())
return result
def _addLinkToFile(self, fileName ,linkText):
import ntpath
baseName = ntpath.basename(fileName)
# self.markdownWriter.addLink(linkUrl="./{0}".format(baseName), linkText=linkText, linkTitle=baseName)
def _getLinkInfoForFile(self, fileName):
import ntpath
return "./{0}".format(ntpath.basename(fileName))
def registerOrgAdvice(self, joinpoint):
orgName = joinpoint.kwargs['orgName']
newlyRegisteredOrg = joinpoint.proceed()
orgCert = newlyRegisteredOrg.getCertAsPEM()
#Write out key material
(fileName, fileExists) = self.contextHelper.getTmpPathForName(name="dir-org-{0}-cert".format(orgName), extension="pem")
with open(fileName, 'w') as f:
f.write(orgCert)
self._addLinkToFile(fileName=fileName, linkText="Public cert for Organization")
#Now the jinja output
self.output.write(env.get_template("html/org.html").render(org=newlyRegisteredOrg, cert_href=self._getLinkInfoForFile(fileName), path_to_cert=fileName))
return newlyRegisteredOrg
def registerUserAdvice(self, joinpoint):
userName = joinpoint.kwargs['userName']
newlyRegisteredUser = joinpoint.proceed()
#Write out key material
privateKeyAsPem = newlyRegisteredUser.getPrivateKeyAsPEM()
(fileName, fileExists) = self.contextHelper.getTmpPathForName(name="dir-user-{0}-privatekey".format(userName), extension="pem")
with open(fileName, 'w') as f:
f.write(privateKeyAsPem)
#Weave into user tags setting
weave(target=newlyRegisteredUser.setTagValue, advices=self.userSetTagValueAdvice)
#Now the jinja output
self.output.write(env.get_template("html/user.html").render(user=newlyRegisteredUser, private_key_href=self._getLinkInfoForFile(fileName)))
return newlyRegisteredUser
def afterScenarioAdvice(self, joinpoint):
scenario = joinpoint.kwargs['scenario']
#Render with jinja
header = env.get_template("html/scenario.html").render(scenario=scenario, steps=scenario.steps)
main = env.get_template("html/main.html").render(header=header, body=self.output.getvalue())
(fileName, fileExists) = self.contextHelper.getTmpPathForName("scenario", extension="html")
with open(fileName, 'w') as f:
f.write(main.encode("utf-8"))
self._writeNetworkJson()
return joinpoint.proceed()
def registerNamedNodeAdminTupleAdvice(self, joinpoint):
namedNodeAdminTuple = joinpoint.proceed()
directory = joinpoint.kwargs['self']
#jinja
newCertAsPEM = directory.getCertAsPEM(namedNodeAdminTuple)
self.output.write(env.get_template("html/header.html").render(text="Created new named node admin tuple: {0}".format(namedNodeAdminTuple), level=4))
self.output.write(env.get_template("html/cli.html").render(command=newCertAsPEM))
#Write cert out
fileNameTocheck = "dir-user-{0}-cert-{1}-{2}".format(namedNodeAdminTuple.user, namedNodeAdminTuple.nodeName, namedNodeAdminTuple.organization)
(fileName, fileExists) = self.contextHelper.getTmpPathForName(fileNameTocheck, extension="pem")
with open(fileName, 'w') as f:
f.write(newCertAsPEM)
return namedNodeAdminTuple
def bootstrapHelperSignConfigItemAdvice(self, joinpoint):
configItem = joinpoint.kwargs['configItem']
#jinja
self.output.write(env.get_template("html/header.html").render(text="Dumping signed config item...", level=4))
self.output.write(env.get_template("html/protobuf.html").render(msg=configItem, msgLength=len(str(configItem))))
signedConfigItem = joinpoint.proceed()
return signedConfigItem
def getBootstrapHelperAdvice(self, joinpoint):
bootstrapHelper = joinpoint.proceed()
weave(target=bootstrapHelper.signConfigItem, advices=self.bootstrapHelperSignConfigItemAdvice)
return bootstrapHelper
def _isProtobufMessage(self, target):
return isinstance(target, Message)
def _isListOfProtobufMessages(self, target):
result = False
if isinstance(target, list):
messageList = [item for item in target if self._isProtobufMessage(item)]
result = len(messageList) == len(target)
return result
def _isDictOfProtobufMessages(self, target):
result = False
if isinstance(target, dict):
messageList = [item for item in target.values() if self._isProtobufMessage(item)]
result = len(messageList) == len(target)
return result
def _writeProtobuf(self, fileName, msg):
import ntpath
baseName = ntpath.basename(fileName)
dataToWrite = msg.SerializeToString()
with open("{0}".format(fileName), 'wb') as f:
f.write(dataToWrite)
self.output.write(env.get_template("html/protobuf.html").render(id=baseName, msg=msg, path_to_protobuf=fileName, msgLength=len(dataToWrite),linkUrl="./{0}".format(baseName), linkText="Protobuf message in binary form", linkTitle=baseName))
def userSetTagValueAdvice(self, joinpoint):
result = joinpoint.proceed()
user = joinpoint.kwargs['self']
tagKey = joinpoint.kwargs['tagKey']
tagValue = joinpoint.kwargs['tagValue']
#jinja invoke
self.output.write(env.get_template("html/tag.html").render(user=user, tag_key=tagKey))
# If protobuf message, write out in binary form
if self._isProtobufMessage(tagValue):
import ntpath
(fileName, fileExists) = self.contextHelper.getTmpPathForName("{0}-{1}".format(user.getUserName(), tagKey), extension="protobuf")
self._writeProtobuf(fileName=fileName, msg=tagValue)
# If protobuf message, write out in binary form
elif self._isListOfProtobufMessages(tagValue):
index = 0
for msg in tagValue:
(fileName, fileExists) = self.contextHelper.getTmpPathForName("{0}-{1}-{2:0>4}".format(user.getUserName(), tagKey, index), extension="protobuf")
self._writeProtobuf(fileName=fileName, msg=msg)
index += 1
elif self._isDictOfProtobufMessages(tagValue):
for key,msg in tagValue.iteritems():
(fileName, fileExists) = self.contextHelper.getTmpPathForName("{0}-{1}-{2}".format(user.getUserName(), tagKey, key), extension="protobuf")
self._writeProtobuf(fileName=fileName, msg=msg)
else:
self.output.write(env.get_template("html/cli.html").render(command=str(tagValue)))
return result | apache-2.0 | -36,151,572,556,608,720 | 48.992727 | 246 | 0.669673 | false |
martynovp/edx-platform | common/test/acceptance/tests/studio/test_studio_acid_xblock.py | 130 | 6909 | """
Acceptance tests for Studio related to the acid xblock.
"""
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(WebAppTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page = AutoAuthPage(
self.browser,
staff=False,
username=self.user.get('username'),
email=self.user.get('email'),
password=self.user.get('password')
)
self.auth_page.visit()
def validate_acid_block_preview(self, acid_block):
"""
Validate the Acid Block's preview
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio editor
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidParentBase(XBlockAcidBase):
"""
Base class for tests that verify that parent XBlock integration is working correctly
"""
__test__ = False
def validate_acid_block_preview(self, acid_block):
super(XBlockAcidParentBase, self).validate_acid_block_preview(acid_block)
self.assertTrue(acid_block.child_tests_passed)
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
container = unit.xblocks[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
class XBlockAcidEmptyParentTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
)
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidChildTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
self.user = course_fix.user
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor()
| agpl-3.0 | 7,982,573,496,535,149,000 | 32.538835 | 104 | 0.590389 | false |
rasata/ansible | lib/ansible/plugins/filter/core.py | 10 | 9635 | # (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import sys
import base64
import itertools
import json
import os.path
import ntpath
import types
import pipes
import glob
import re
import crypt
import hashlib
import string
from functools import partial
import operator as py_operator
from random import SystemRandom, shuffle
import uuid
import yaml
from jinja2.filters import environmentfilter
from distutils.version import LooseVersion, StrictVersion
from six import iteritems
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap, to_unicode
from ansible.utils.vars import merge_hash
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_unicode(transformed)
def to_nice_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=4, allow_unicode=True, default_flow_style=False, **kw)
return to_unicode(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, *args, **kw)
def to_nice_json(a, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw)
# Fallback to the to_json filter
return to_json(a, *args, **kw)
return json.dumps(a, indent=4, sort_keys=True, *args, **kw)
def bool(a):
''' return a bool for the arg '''
if a is None or type(a) == bool:
return a
if type(a) in types.StringTypes:
a = a.lower()
if a in ['yes', 'on', '1', 'true', 1]:
return True
else:
return False
def quote(a):
''' return its argument quoted for shell usage '''
return pipes.quote(a)
def fileglob(pathname):
''' return list of matched files for glob '''
return glob.glob(pathname)
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
if not isinstance(value, basestring):
value = str(value)
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def version_compare(value, version, operator='eq', strict=False):
''' Perform a version comparison on a value '''
op_map = {
'==': 'eq', '=': 'eq', 'eq': 'eq',
'<': 'lt', 'lt': 'lt',
'<=': 'le', 'le': 'le',
'>': 'gt', 'gt': 'gt',
'>=': 'ge', 'ge': 'ge',
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
}
if strict:
Version = StrictVersion
else:
Version = LooseVersion
if operator in op_map:
operator = op_map[operator]
else:
raise errors.AnsibleFilterError('Invalid operator type')
try:
method = getattr(py_operator, operator)
return method(Version(str(value)), Version(str(version)))
except Exception as e:
raise errors.AnsibleFilterError('Version comparison: %s' % e)
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
@environmentfilter
def rand(environment, end, start=None, step=None):
r = SystemRandom()
if isinstance(end, (int, long)):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist):
try:
mylist = list(mylist)
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(data)
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
hastype = hashtype.lower()
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
cls = getattr(passlib.hash, '%s_crypt' % hashtype)
encrypted = cls.encrypt(password, salt=salt)
return encrypted
return None
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
def mandatory(a):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def combine(*terms, **kwargs):
recursive = kwargs.get('recursive', False)
if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
for t in terms:
if not isinstance(t, dict):
raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
if recursive:
return reduce(merge_hash, terms)
else:
return dict(itertools.chain(*map(iteritems, terms)))
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# base 64
'b64decode': partial(unicode_wrap, base64.b64decode),
'b64encode': partial(unicode_wrap, base64.b64encode),
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': yaml.safe_load,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# value as boolean
'bool': bool,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digeset of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksuming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# file glob
'fileglob': fileglob,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
# ? : ;
'ternary': ternary,
# list
# version comparison
'version_compare': version_compare,
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# merge dicts
'combine': combine,
}
| gpl-3.0 | 8,999,721,198,015,210,000 | 28.829721 | 147 | 0.601661 | false |
apark263/tensorflow | tensorflow/python/keras/optimizer_v2/ftrl_test.py | 13 | 17873 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(test.TestCase):
def doTestFtrlwithoutRegularization(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=False)
@test_util.run_deprecated_v1
def testResourceFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=True)
@test_util.run_deprecated_v1
def testFtrlwithoutRegularization2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), v1_val)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testFtrlWithL1(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL1_L2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.22578995, -0.44345796]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.14378493, -0.13229476]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL1_L2_L2ShrinkageSparse(self):
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val)
self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
opt0 = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
opt1 = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update0 = opt0.apply_gradients([(grads0, var0)])
update1 = opt1.apply_gradients([(grads1, var1)])
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update0.run()
update1.run()
v0_val, v1_val = self.evaluate([var0, var1])
# var0 is experiencing L2 shrinkage so it should be smaller than var1
# in magnitude.
self.assertTrue((v0_val**2 < v1_val**2).all())
accum0 = sess.run(opt0.get_slot(var0, "accumulator"))
accum1 = sess.run(opt1.get_slot(var1, "accumulator"))
# L2 shrinkage should not change how we update grad accumulator.
self.assertAllCloseAccordingToType(accum0, accum1)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[0.0], [0.0]], dtype=dtype)
var1 = variables.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = self.evaluate([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
@test_util.run_deprecated_v1
def testEquivAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
@test_util.run_deprecated_v1
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
@test_util.run_deprecated_v1
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
@test_util.run_deprecated_v1
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
test.main()
| apache-2.0 | -4,569,386,590,583,247,000 | 39.345372 | 118 | 0.625133 | false |
muffinresearch/olympia | scripts/siege.py | 24 | 2862 | """
A script for generating siege files with a bunch of URL variations.
"""
import re
import sys
part_re = re.compile(r'\{([-\w]+)\}')
AMO_LANGUAGES = (
'af', 'ar', 'ca', 'cs', 'da', 'de', 'el', 'en-US', 'es', 'eu', 'fa', 'fi',
'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko', 'mn', 'nl', 'pl',
'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sq', 'sr', 'sv-SE', 'uk', 'vi',
'zh-CN', 'zh-TW',
)
config = {
'base': [],
'locale': AMO_LANGUAGES,
'app': ['firefox'],
'extension-slug': [''] + """
alerts-and-updates appearance bookmarks download-management
feeds-news-blogging language-support photos-music-videos
privacy-security social-communication tabs toolbars web-development
other""".split(),
'theme-slug': [''] + """
animals compact large miscellaneous modern nature os-integration retro
sports""".split(),
'theme-sort': 'name updated created downloads rating'.split(),
'page': '1 2'.split(),
'exp': 'on off'.split(),
'personas-slug': [''] + """
abstract causes fashion firefox foxkeh holiday music nature other
scenery seasonal solid sports websites""".split(),
'personas-sort': """up-and-coming created popular rating""".split()
}
root = '{base}/{locale}/{app}'
templates = t = {
'root': '/',
'extensions': '/extensions/{extension-slug}/',
'language-tools': '/language-tools',
'themes': '/themes/{theme-slug}?sort={theme-sort}&page={page}',
'personas': '/personas/{personas-slug}',
}
t['themes-unreviewed'] = t['themes'] + '&unreviewed={exp}'
t['personas-sort'] = t['personas'] + '?sort={personas-sort}'
t['extensions-sort'] = t['extensions'] + '?sort={theme-sort}'
t['extensions-featured'] = t['extensions'] + 'featured'
for key, value in templates.items():
templates[key] = root + value
def combos(s, parts):
def _rec(s, parts, kw):
key, rest = parts[0], parts[1:]
rv = []
for opt in config[key]:
kw[key] = opt
if not rest:
rv.append(s.format(**kw))
else:
rv.extend(_rec(s, rest, kw))
return rv
return _rec(s, parts, {})
def gen(choices=templates):
rv = []
for template in choices:
parts = part_re.findall(template)
rv.extend(combos(template, parts))
return rv
def main():
args = sys.argv
try:
base, choices = sys.argv[1], args[2:] or templates.keys()
except IndexError:
print 'Usage: python siege.py <BASE> [%s]' % (', '.join(templates))
print '\nBASE should be something like "http://localhost:8000/z".'
print 'The remaining arguments are names of url templates.'
sys.exit(1)
config['base'] = [base.rstrip('/')]
print '\n'.join(gen(templates[k] for k in choices))
if __name__ == '__main__':
main()
| bsd-3-clause | 2,501,134,686,683,015,700 | 28.505155 | 78 | 0.566038 | false |
fredericgermain/linux-sunxi | tools/perf/util/setup.py | 989 | 1543 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 | -3,433,588,358,836,454,000 | 31.145833 | 82 | 0.666883 | false |
ritchiewilson/majormajor | tests/majormajor_tests/test_majormajor_helpers.py | 1 | 1120 | # MajorMajor - Collaborative Document Editing Library
# Copyright (C) 2013 Ritchie Wilson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from majormajor.majormajor import MajorMajor
from majormajor.document import Document
class TestMajorMajorHelpers:
def setup_method(self, method):
self.collab0 = MajorMajor()
def test_new_document(self):
# leaving nothing specified
doc = self.collab0.new_document()
assert isinstance(doc, Document)
assert doc.get_snapshot() == {}
| gpl-3.0 | -2,733,914,405,155,297,000 | 34 | 71 | 0.733929 | false |
yilei0620/3D_Conditional_Gan | lib/data_utils.py | 1 | 1596 | import numpy as np
from sklearn import utils as skutils
from rng import np_rng, py_rng
def center_crop(x, ph, pw=None):
if pw is None:
pw = ph
h, w = x.shape[:2]
j = int(round((h - ph)/2.))
i = int(round((w - pw)/2.))
return x[j:j+ph, i:i+pw]
def patch(x, ph, pw=None):
if pw is None:
pw = ph
h, w = x.shape[:2]
j = py_rng.randint(0, h-ph)
i = py_rng.randint(0, w-pw)
x = x[j:j+ph, i:i+pw]
return x
def list_shuffle(*data):
idxs = np_rng.permutation(np.arange(len(data[0])))
if len(data) == 1:
return [data[0][idx] for idx in idxs]
else:
return [[d[idx] for idx in idxs] for d in data]
def shuffle(*arrays, **options):
if isinstance(arrays[0][0], basestring):
return list_shuffle(*arrays)
else:
return skutils.shuffle(*arrays, random_state=np_rng)
def OneHot(X, n=None, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def iter_data(*data, **kwargs):
size = kwargs.get('size', 128)
n = kwargs.get('ndata',0)
sIndex = kwargs.get('shuffle_index',[])
batches = n / size
if n % size != 0:
batches += 1
for b in range(batches):
start = b * size
end = (b + 1) * size
if end > n:
end = n
if len(data) == 1:
yield data[0][start:end]
else:
# print sIndex[start:end]
yield tuple([d[sIndex[start:end]] for d in data])
| mit | 2,082,431,232,394,358,500 | 23.553846 | 61 | 0.536341 | false |
Changaco/oh-mainline | vendor/packages/scrapy/scrapy/utils/project.py | 19 | 1474 | from os.path import join, dirname, abspath, isabs, exists
from os import makedirs, environ
import warnings
from scrapy.utils.conf import closest_scrapy_cfg, get_config
from scrapy.utils.python import is_writable
from scrapy.exceptions import NotConfigured
DATADIR_CFG_SECTION = 'datadir'
def inside_project():
scrapy_module = environ.get('SCRAPY_SETTINGS_MODULE')
if scrapy_module is not None:
try:
__import__(scrapy_module)
except ImportError:
warnings.warn("Cannot import scrapy settings module %s" % scrapy_module)
else:
return True
return bool(closest_scrapy_cfg())
def project_data_dir(project='default'):
"""Return the current project data dir, creating it if it doesn't exist"""
if not inside_project():
raise NotConfigured("Not inside a project")
cfg = get_config()
if cfg.has_option(DATADIR_CFG_SECTION, project):
d = cfg.get(DATADIR_CFG_SECTION, project)
else:
scrapy_cfg = closest_scrapy_cfg()
if not scrapy_cfg:
raise NotConfigured("Unable to find scrapy.cfg file to infer project data dir")
d = abspath(join(dirname(scrapy_cfg), '.scrapy'))
if not exists(d):
makedirs(d)
return d
def data_path(path):
"""If path is relative, return the given path inside the project data dir,
otherwise return the path unmodified
"""
return path if isabs(path) else join(project_data_dir(), path)
| agpl-3.0 | 1,417,129,636,767,579,000 | 34.095238 | 91 | 0.672999 | false |
Worldify/Worldify | worldify/config.py | 1 | 1709 | import os
from ConfigParser import ConfigParser
from .exceptions import WorldifyConfigException
class WorldifyConfig(object):
def __init__(self):
self._config_path = os.path.expanduser("~/.worldify")
self.conf = ConfigParser()
self.conf.read(self._config_path)
self._check_config_contents()
self._create_config_objects()
def _check_config_exsists(self):
if not os.path.exists(self._config_path):
raise WorldifyConfigException("No config file found at {0}".format(self._config_path))
return True
def _check_config_contents(self):
expected_config = {
"twitter": ['customer_key', 'customer_secret', 'access_key', 'access_secret'],
"recptiviti": ['api_key', 'api_secret'],
"spotify": ['user_id', 'user_oath', 'client_id', 'client_secret']
}
for key in expected_config:
if not self.conf.has_section(key):
raise WorldifyConfigException("Could not find the {} section in the worldify "
"config file.".format(key))
for option in expected_config[key]:
if not self.conf.has_option(key, option):
raise WorldifyConfigException("Could not find the {0}.{1} option in the "
"worldify config file".format(key, option))
def _create_config_objects(self):
self.twitter = {item[0]: item[1] for item in self.conf.items("twitter")}
self.recptiviti = {item[0]: item[1] for item in self.conf.items("recptiviti")}
self.spotify = {item[0]: item[1] for item in self.conf.items("spotify")}
| gpl-3.0 | -342,856,834,993,548,000 | 42.820513 | 98 | 0.587478 | false |
agrista/odoo-saas | addons/base_geolocalize/models/res_partner.py | 239 | 3743 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013_Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import simplejson as json
except ImportError:
import json # noqa
import urllib
from openerp.osv import osv, fields
from openerp import tools
from openerp.tools.translate import _
def geo_find(addr):
url = 'https://maps.googleapis.com/maps/api/geocode/json?sensor=false&address='
url += urllib.quote(addr.encode('utf8'))
try:
result = json.load(urllib.urlopen(url))
except Exception, e:
raise osv.except_osv(_('Network error'),
_('Cannot contact geolocation servers. Please make sure that your internet connection is up and running (%s).') % e)
if result['status'] != 'OK':
return None
try:
geo = result['results'][0]['geometry']['location']
return float(geo['lat']), float(geo['lng'])
except (KeyError, ValueError):
return None
def geo_query_address(street=None, zip=None, city=None, state=None, country=None):
if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')):
# put country qualifier in front, otherwise GMap gives wrong results,
# e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo'
country = '{1} {0}'.format(*country.split(',', 1))
return tools.ustr(', '.join(filter(None, [street,
("%s %s" % (zip or '', city or '')).strip(),
state,
country])))
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_latitude': fields.float('Geo Latitude', digits=(16, 5)),
'partner_longitude': fields.float('Geo Longitude', digits=(16, 5)),
'date_localization': fields.date('Geo Localization Date'),
}
def geo_localize(self, cr, uid, ids, context=None):
# Don't pass context to browse()! We need country names in english below
for partner in self.browse(cr, uid, ids):
if not partner:
continue
result = geo_find(geo_query_address(street=partner.street,
zip=partner.zip,
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name))
if result:
self.write(cr, uid, [partner.id], {
'partner_latitude': result[0],
'partner_longitude': result[1],
'date_localization': fields.date.context_today(self, cr, uid, context=context)
}, context=context)
return True
| agpl-3.0 | -636,352,200,834,316,500 | 41.534091 | 145 | 0.555971 | false |
sogis/Quantum-GIS | python/ext-libs/pygments/lexers/_clbuiltins.py | 370 | 14015 | # -*- coding: utf-8 -*-
"""
pygments.lexers._clbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = [ # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
'simple-condition-format-arguments', 'simple-condition-format-control',
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
'slot-unbound', 'slot-value', 'software-type', 'software-version',
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
'standard-char-p', 'store-value', 'stream-element-type',
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
'string-capitalize', 'string-downcase', 'string-equal',
'string-greaterp', 'string-left-trim', 'string-lessp',
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
'truename', 'truncate', 'two-way-stream-input-stream',
'two-way-stream-output-stream', 'type-error-datum',
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
'update-instance-for-different-class',
'update-instance-for-redefined-class', 'upgraded-array-element-type',
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
]
SPECIAL_FORMS = [
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
]
MACROS = [
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
'multiple-value-setq', 'nth-value', 'or', 'pop',
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
'with-condition-restarts', 'with-hash-table-iterator',
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
]
LAMBDA_LIST_KEYWORDS = [
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
]
DECLARATIONS = [
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
]
BUILTIN_TYPES = [
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
'simple-vector', 'standard-char', 'unsigned-byte',
# Condition Types
'arithmetic-error', 'cell-error', 'condition', 'control-error',
'division-by-zero', 'end-of-file', 'error', 'file-error',
'floating-point-inexact', 'floating-point-overflow',
'floating-point-underflow', 'floating-point-invalid-operation',
'parse-error', 'package-error', 'print-not-readable', 'program-error',
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
]
BUILTIN_CLASSES = [
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
'real', 'random-state', 'restart', 'sequence', 'standard-class',
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
]
| gpl-2.0 | 3,619,368,868,533,806,600 | 59.409483 | 80 | 0.629825 | false |
Softmotions/edx-platform | lms/djangoapps/survey/models.py | 89 | 8631 | """
Models to support Course Surveys feature
"""
import logging
from lxml import etree
from collections import OrderedDict
from django.db import models
from student.models import User
from django.core.exceptions import ValidationError
from model_utils.models import TimeStampedModel
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
from xmodule_django.models import CourseKeyField
log = logging.getLogger("edx.survey")
class SurveyForm(TimeStampedModel):
"""
Model to define a Survey Form that contains the HTML form data
that is presented to the end user. A SurveyForm is not tied to
a particular run of a course, to allow for sharing of Surveys
across courses
"""
name = models.CharField(max_length=255, db_index=True, unique=True)
form = models.TextField()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""
Override save method so we can validate that the form HTML is
actually parseable
"""
self.validate_form_html(self.form)
# now call the actual save method
super(SurveyForm, self).save(*args, **kwargs)
@classmethod
def validate_form_html(cls, html):
"""
Makes sure that the html that is contained in the form field is valid
"""
try:
fields = cls.get_field_names_from_html(html)
except Exception as ex:
log.exception("Cannot parse SurveyForm html: {}".format(ex))
raise ValidationError("Cannot parse SurveyForm as HTML: {}".format(ex))
if not len(fields):
raise ValidationError("SurveyForms must contain at least one form input field")
@classmethod
def create(cls, name, form, update_if_exists=False):
"""
Helper class method to create a new Survey Form.
update_if_exists=True means that if a form already exists with that name, then update it.
Otherwise throw an SurveyFormAlreadyExists exception
"""
survey = cls.get(name, throw_if_not_found=False)
if not survey:
survey = SurveyForm(name=name, form=form)
else:
if update_if_exists:
survey.form = form
else:
raise SurveyFormNameAlreadyExists()
survey.save()
return survey
@classmethod
def get(cls, name, throw_if_not_found=True):
"""
Helper class method to look up a Survey Form, throw FormItemNotFound if it does not exists
in the database, unless throw_if_not_found=False then we return None
"""
survey = None
exists = SurveyForm.objects.filter(name=name).exists()
if exists:
survey = SurveyForm.objects.get(name=name)
elif throw_if_not_found:
raise SurveyFormNotFound()
return survey
def get_answers(self, user=None, limit_num_users=10000):
"""
Returns all answers for all users for this Survey
"""
return SurveyAnswer.get_answers(self, user, limit_num_users=limit_num_users)
def has_user_answered_survey(self, user):
"""
Returns whether a given user has supplied answers to this
survey
"""
return SurveyAnswer.do_survey_answers_exist(self, user)
def save_user_answers(self, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
# first remove any answer the user might have done before
self.clear_user_answers(user)
SurveyAnswer.save_answers(self, user, answers, course_key)
def clear_user_answers(self, user):
"""
Removes all answers that a user has submitted
"""
SurveyAnswer.objects.filter(form=self, user=user).delete()
def get_field_names(self):
"""
Returns a list of defined field names for all answers in a survey. This can be
helpful for reporting like features, i.e. adding headers to the reports
This is taken from the set of <input> fields inside the form.
"""
return SurveyForm.get_field_names_from_html(self.form)
@classmethod
def get_field_names_from_html(cls, html):
"""
Returns a list of defined field names from a block of HTML
"""
names = []
# make sure the form is wrap in some outer single element
# otherwise lxml can't parse it
# NOTE: This wrapping doesn't change the ability to query it
tree = etree.fromstring(u'<div>{}</div>'.format(html))
input_fields = (
tree.findall('.//input') + tree.findall('.//select') +
tree.findall('.//textarea')
)
for input_field in input_fields:
if 'name' in input_field.keys() and input_field.attrib['name'] not in names:
names.append(input_field.attrib['name'])
return names
class SurveyAnswer(TimeStampedModel):
"""
Model for the answers that a user gives for a particular form in a course
"""
user = models.ForeignKey(User, db_index=True)
form = models.ForeignKey(SurveyForm, db_index=True)
field_name = models.CharField(max_length=255, db_index=True)
field_value = models.CharField(max_length=1024)
# adding the course_id where the end-user answered the survey question
# since it didn't exist in the beginning, it is nullable
course_key = CourseKeyField(max_length=255, db_index=True, null=True)
@classmethod
def do_survey_answers_exist(cls, form, user):
"""
Returns whether a user has any answers for a given SurveyForm for a course
This can be used to determine if a user has taken a CourseSurvey.
"""
return SurveyAnswer.objects.filter(form=form, user=user).exists()
@classmethod
def get_answers(cls, form, user=None, limit_num_users=10000):
"""
Returns all answers a user (or all users, when user=None) has given to an instance of a SurveyForm
Return is a nested dict which are simple name/value pairs with an outer key which is the
user id. For example (where 'field3' is an optional field):
results = {
'1': {
'field1': 'value1',
'field2': 'value2',
},
'2': {
'field1': 'value3',
'field2': 'value4',
'field3': 'value5',
}
:
:
}
limit_num_users is to prevent an unintentional huge, in-memory dictionary.
"""
if user:
answers = SurveyAnswer.objects.filter(form=form, user=user)
else:
answers = SurveyAnswer.objects.filter(form=form)
results = OrderedDict()
num_users = 0
for answer in answers:
user_id = answer.user.id
if user_id not in results and num_users < limit_num_users:
results[user_id] = OrderedDict()
num_users = num_users + 1
if user_id in results:
results[user_id][answer.field_name] = answer.field_value
return results
@classmethod
def save_answers(cls, form, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
for name in answers.keys():
value = answers[name]
# See if there is an answer stored for this user, form, field_name pair or not
# this will allow for update cases. This does include an additional lookup,
# but write operations will be relatively infrequent
value = answers[name]
defaults = {"field_value": value}
if course_key:
defaults['course_key'] = course_key
answer, created = SurveyAnswer.objects.get_or_create(
user=user,
form=form,
field_name=name,
defaults=defaults
)
if not created:
# Allow for update cases.
answer.field_value = value
answer.course_key = course_key
answer.save()
| agpl-3.0 | -1,312,584,718,714,951,400 | 32.714844 | 106 | 0.610358 | false |
ebrelsford/v2v | vacant_to_vibrant/steward/migrations/0007_auto__add_field_stewardproject_external_id.py | 1 | 10857 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StewardProject.external_id'
db.add_column(u'steward_stewardproject', 'external_id',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'StewardProject.external_id'
db.delete_column(u'steward_stewardproject', 'external_id')
models = {
u'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': u"orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'lots.use': {
'Meta': {'object_name': 'Use'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'organize.organizertype': {
'Meta': {'object_name': 'OrganizerType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'phillyorganize.organizer': {
'Meta': {'object_name': 'Organizer'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'facebook_page': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'receive_text_messages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organize.OrganizerType']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'steward.optedinstewardprojectmanager': {
'Meta': {'object_name': 'OptedInStewardProjectManager'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'steward.stewardnotification': {
'Meta': {'object_name': 'StewardNotification'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'facebook_page': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_on_map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'land_tenure_status': ('django.db.models.fields.CharField', [], {'default': "u'not sure'", 'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'support_organization': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organize.OrganizerType']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'use': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lots.Use']"})
},
u'steward.stewardproject': {
'Meta': {'object_name': 'StewardProject'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_on_map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'land_tenure_status': ('django.db.models.fields.CharField', [], {'default': "u'not sure'", 'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['phillyorganize.Organizer']", 'null': 'True', 'blank': 'True'}),
'support_organization': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'use': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lots.Use']"})
}
}
complete_apps = ['steward'] | gpl-3.0 | 5,375,956,182,319,262,000 | 76.557143 | 205 | 0.556231 | false |
win0x86/Lab | mitm/libmproxy/proxy.py | 1 | 24823 | import sys, os, string, socket, time
import shutil, tempfile, threading
import SocketServer
from OpenSSL import SSL
from netlib import odict, tcp, http, wsgi, certutils, http_status, http_auth
import utils, flow, version, platform, controller
KILL = 0
class ProxyError(Exception):
def __init__(self, code, msg, headers=None):
self.code, self.msg, self.headers = code, msg, headers
def __str__(self):
return "ProxyError(%s, %s)"%(self.code, self.msg)
class Log:
def __init__(self, msg):
self.msg = msg
class ProxyConfig:
def __init__(self, certfile = None, cacert = None, clientcerts = None, no_upstream_cert=False, body_size_limit = None, reverse_proxy=None, transparent_proxy=None, authenticator=None):
self.certfile = certfile
self.cacert = cacert
self.clientcerts = clientcerts
self.no_upstream_cert = no_upstream_cert
self.body_size_limit = body_size_limit
self.reverse_proxy = reverse_proxy
self.transparent_proxy = transparent_proxy
self.authenticator = authenticator
self.certstore = certutils.CertStore()
class ServerConnection(tcp.TCPClient):
def __init__(self, config, scheme, host, port, sni):
tcp.TCPClient.__init__(self, host, port)
self.config = config
self.scheme, self.sni = scheme, sni
self.requestcount = 0
self.tcp_setup_timestamp = None
self.ssl_setup_timestamp = None
def connect(self):
tcp.TCPClient.connect(self)
self.tcp_setup_timestamp = time.time()
if self.scheme == "https":
clientcert = None
if self.config.clientcerts:
path = os.path.join(self.config.clientcerts, self.host.encode("idna")) + ".pem"
if os.path.exists(path):
clientcert = path
try:
self.convert_to_ssl(cert=clientcert, sni=self.sni)
self.ssl_setup_timestamp = time.time()
except tcp.NetLibError, v:
raise ProxyError(400, str(v))
def send(self, request):
self.requestcount += 1
d = request._assemble()
if not d:
raise ProxyError(502, "Cannot transmit an incomplete request.")
self.wfile.write(d)
self.wfile.flush()
def terminate(self):
if self.connection:
try:
self.wfile.flush()
except tcp.NetLibDisconnect: # pragma: no cover
pass
self.connection.close()
class RequestReplayThread(threading.Thread):
def __init__(self, config, flow, masterq):
self.config, self.flow, self.channel = config, flow, controller.Channel(masterq)
threading.Thread.__init__(self)
def run(self):
try:
r = self.flow.request
server = ServerConnection(self.config, r.scheme, r.host, r.port, r.host)
server.connect()
server.send(r)
tsstart = utils.timestamp()
httpversion, code, msg, headers, content = http.read_response(
server.rfile, r.method, self.config.body_size_limit
)
response = flow.Response(
self.flow.request, httpversion, code, msg, headers, content, server.cert,
server.rfile.first_byte_timestamp
)
self.channel.ask(response)
except (ProxyError, http.HttpError, tcp.NetLibError), v:
err = flow.Error(self.flow.request, str(v))
self.channel.ask(err)
class HandleSNI:
def __init__(self, handler, client_conn, host, port, cert, key):
self.handler, self.client_conn, self.host, self.port = handler, client_conn, host, port
self.cert, self.key = cert, key
def __call__(self, connection):
try:
sn = connection.get_servername()
if sn:
self.handler.get_server_connection(self.client_conn, "https", self.host, self.port, sn)
new_context = SSL.Context(SSL.TLSv1_METHOD)
new_context.use_privatekey_file(self.key)
new_context.use_certificate(self.cert.x509)
connection.set_context(new_context)
self.handler.sni = sn.decode("utf8").encode("idna")
# An unhandled exception in this method will core dump PyOpenSSL, so
# make dang sure it doesn't happen.
except Exception, e: # pragma: no cover
pass
class ProxyHandler(tcp.BaseHandler):
def __init__(self, config, connection, client_address, server, channel, server_version):
self.channel, self.server_version = channel, server_version
self.config = config
self.proxy_connect_state = None
self.sni = None
self.server_conn = None
tcp.BaseHandler.__init__(self, connection, client_address, server)
def get_server_connection(self, cc, scheme, host, port, sni):
"""
When SNI is in play, this means we have an SSL-encrypted
connection, which means that the entire handler is dedicated to a
single server connection - no multiplexing. If this assumption ever
breaks, we'll have to do something different with the SNI host
variable on the handler object.
"""
sc = self.server_conn
if not sni:
sni = host
if sc and (scheme, host, port, sni) != (sc.scheme, sc.host, sc.port, sc.sni):
sc.terminate()
self.server_conn = None
self.log(
cc,
"switching connection", [
"%s://%s:%s (sni=%s) -> %s://%s:%s (sni=%s)"%(
scheme, host, port, sni,
sc.scheme, sc.host, sc.port, sc.sni
)
]
)
if not self.server_conn:
try:
self.server_conn = ServerConnection(self.config, scheme, host, port, sni)
self.server_conn.connect()
except tcp.NetLibError, v:
raise ProxyError(502, v)
return self.server_conn
def del_server_connection(self):
if self.server_conn:
self.server_conn.terminate()
self.server_conn = None
def handle(self):
cc = flow.ClientConnect(self.client_address)
self.log(cc, "connect")
self.channel.ask(cc)
while self.handle_request(cc) and not cc.close:
pass
cc.close = True
self.del_server_connection()
cd = flow.ClientDisconnect(cc)
self.log(
cc, "disconnect",
[
"handled %s requests"%cc.requestcount]
)
self.channel.tell(cd)
def handle_request(self, cc):
try:
request, err = None, None
request = self.read_request(cc)
if request is None:
return
cc.requestcount += 1
app = self.server.apps.get(request)
if app:
err = app.serve(request, self.wfile)
if err:
self.log(cc, "Error in wsgi app.", err.split("\n"))
return
else:
request_reply = self.channel.ask(request)
if request_reply is None or request_reply == KILL:
return
elif isinstance(request_reply, flow.Response):
request = False
response = request_reply
response_reply = self.channel.ask(response)
else:
request = request_reply
if self.config.reverse_proxy:
scheme, host, port = self.config.reverse_proxy
else:
scheme, host, port = request.scheme, request.host, request.port
# If we've already pumped a request over this connection,
# it's possible that the server has timed out. If this is
# the case, we want to reconnect without sending an error
# to the client.
while 1:
sc = self.get_server_connection(cc, scheme, host, port, self.sni)
sc.send(request)
if sc.requestcount == 1: # add timestamps only for first request (others are not directly affected)
request.tcp_setup_timestamp = sc.tcp_setup_timestamp
request.ssl_setup_timestamp = sc.ssl_setup_timestamp
sc.rfile.reset_timestamps()
try:
tsstart = utils.timestamp()
httpversion, code, msg, headers, content = http.read_response(
sc.rfile,
request.method,
self.config.body_size_limit
)
except http.HttpErrorConnClosed, v:
self.del_server_connection()
if sc.requestcount > 1:
continue
else:
raise
except http.HttpError, v:
raise ProxyError(502, "Invalid server response.")
else:
break
response = flow.Response(
request, httpversion, code, msg, headers, content, sc.cert,
sc.rfile.first_byte_timestamp
)
response_reply = self.channel.ask(response)
# Not replying to the server invalidates the server
# connection, so we terminate.
if response_reply == KILL:
sc.terminate()
if response_reply == KILL:
return
else:
response = response_reply
self.send_response(response)
if request and http.request_connection_close(request.httpversion, request.headers):
return
# We could keep the client connection when the server
# connection needs to go away. However, we want to mimic
# behaviour as closely as possible to the client, so we
# disconnect.
if http.response_connection_close(response.httpversion, response.headers):
return
except (IOError, ProxyError, http.HttpError, tcp.NetLibError), e:
if hasattr(e, "code"):
cc.error = "%s: %s"%(e.code, e.msg)
else:
cc.error = str(e)
if request:
err = flow.Error(request, cc.error)
self.channel.ask(err)
self.log(
cc, cc.error,
["url: %s"%request.get_url()]
)
else:
self.log(cc, cc.error)
if isinstance(e, ProxyError):
self.send_error(e.code, e.msg, e.headers)
else:
return True
def log(self, cc, msg, subs=()):
msg = [
"%s:%s: "%cc.address + msg
]
for i in subs:
msg.append(" -> "+i)
msg = "\n".join(msg)
l = Log(msg)
self.channel.tell(l)
def find_cert(self, cc, host, port, sni):
if self.config.certfile:
return certutils.SSLCert.from_pem(file(self.config.certfile, "r").read())
else:
sans = []
if not self.config.no_upstream_cert:
conn = self.get_server_connection(cc, "https", host, port, sni)
sans = conn.cert.altnames
host = conn.cert.cn.decode("utf8").encode("idna")
ret = self.config.certstore.get_cert(host, sans, self.config.cacert)
if not ret:
raise ProxyError(502, "Unable to generate dummy cert.")
return ret
def get_line(self, fp):
"""
Get a line, possibly preceded by a blank.
"""
line = fp.readline()
if line == "\r\n" or line == "\n": # Possible leftover from previous message
line = fp.readline()
return line
def read_request_transparent(self, client_conn):
orig = self.config.transparent_proxy["resolver"].original_addr(self.connection)
if not orig:
raise ProxyError(502, "Transparent mode failure: could not resolve original destination.")
self.log(client_conn, "transparent to %s:%s"%orig)
host, port = orig
if port in self.config.transparent_proxy["sslports"]:
scheme = "https"
if not self.ssl_established:
dummycert = self.find_cert(client_conn, host, port, host)
sni = HandleSNI(
self, client_conn, host, port,
dummycert, self.config.certfile or self.config.cacert
)
try:
self.convert_to_ssl(dummycert, self.config.certfile or self.config.cacert, handle_sni=sni)
except tcp.NetLibError, v:
raise ProxyError(400, str(v))
else:
scheme = "http"
line = self.get_line(self.rfile)
if line == "":
return None
r = http.parse_init_http(line)
if not r:
raise ProxyError(400, "Bad HTTP request line: %s"%repr(line))
method, path, httpversion = r
headers = self.read_headers(authenticate=False)
content = http.read_http_body_request(
self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit
)
return flow.Request(
client_conn,httpversion, host, port, scheme, method, path, headers, content,
self.rfile.first_byte_timestamp, utils.timestamp()
)
def read_request_proxy(self, client_conn):
line = self.get_line(self.rfile)
if line == "":
return None
if not self.proxy_connect_state:
connparts = http.parse_init_connect(line)
if connparts:
host, port, httpversion = connparts
headers = self.read_headers(authenticate=True)
self.wfile.write(
'HTTP/1.1 200 Connection established\r\n' +
('Proxy-agent: %s\r\n'%self.server_version) +
'\r\n'
)
self.wfile.flush()
dummycert = self.find_cert(client_conn, host, port, host)
sni = HandleSNI(
self, client_conn, host, port,
dummycert, self.config.certfile or self.config.cacert
)
try:
self.convert_to_ssl(dummycert, self.config.certfile or self.config.cacert, handle_sni=sni)
except tcp.NetLibError, v:
raise ProxyError(400, str(v))
self.proxy_connect_state = (host, port, httpversion)
line = self.rfile.readline(line)
if self.proxy_connect_state:
r = http.parse_init_http(line)
if not r:
raise ProxyError(400, "Bad HTTP request line: %s"%repr(line))
method, path, httpversion = r
headers = self.read_headers(authenticate=False)
host, port, _ = self.proxy_connect_state
content = http.read_http_body_request(
self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit
)
return flow.Request(
client_conn, httpversion, host, port, "https", method, path, headers, content,
self.rfile.first_byte_timestamp, utils.timestamp()
)
else:
r = http.parse_init_proxy(line)
if not r:
raise ProxyError(400, "Bad HTTP request line: %s"%repr(line))
method, scheme, host, port, path, httpversion = r
headers = self.read_headers(authenticate=True)
content = http.read_http_body_request(
self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit
)
return flow.Request(
client_conn, httpversion, host, port, scheme, method, path, headers, content,
self.rfile.first_byte_timestamp, utils.timestamp()
)
def read_request_reverse(self, client_conn):
line = self.get_line(self.rfile)
if line == "":
return None
scheme, host, port = self.config.reverse_proxy
r = http.parse_init_http(line)
if not r:
raise ProxyError(400, "Bad HTTP request line: %s"%repr(line))
method, path, httpversion = r
headers = self.read_headers(authenticate=False)
content = http.read_http_body_request(
self.rfile, self.wfile, headers, httpversion, self.config.body_size_limit
)
return flow.Request(
client_conn, httpversion, host, port, "http", method, path, headers, content,
self.rfile.first_byte_timestamp, utils.timestamp()
)
def read_request(self, client_conn):
self.rfile.reset_timestamps()
if self.config.transparent_proxy:
return self.read_request_transparent(client_conn)
elif self.config.reverse_proxy:
return self.read_request_reverse(client_conn)
else:
return self.read_request_proxy(client_conn)
def read_headers(self, authenticate=False):
headers = http.read_headers(self.rfile)
if headers is None:
raise ProxyError(400, "Invalid headers")
if authenticate and self.config.authenticator:
if self.config.authenticator.authenticate(headers):
self.config.authenticator.clean(headers)
else:
raise ProxyError(
407,
"Proxy Authentication Required",
self.config.authenticator.auth_challenge_headers()
)
return headers
def send_response(self, response):
d = response._assemble()
if not d:
raise ProxyError(502, "Cannot transmit an incomplete response.")
self.wfile.write(d)
self.wfile.flush()
def send_error(self, code, body, headers):
try:
response = http_status.RESPONSES.get(code, "Unknown")
html_content = '<html><head>\n<title>%d %s</title>\n</head>\n<body>\n%s\n</body>\n</html>'%(code, response, body)
self.wfile.write("HTTP/1.1 %s %s\r\n" % (code, response))
self.wfile.write("Server: %s\r\n"%self.server_version)
self.wfile.write("Content-type: text/html\r\n")
self.wfile.write("Content-Length: %d\r\n"%len(html_content))
for key, value in headers.items():
self.wfile.write("%s: %s\r\n"%(key, value))
self.wfile.write("Connection: close\r\n")
self.wfile.write("\r\n")
self.wfile.write(html_content)
self.wfile.flush()
except:
pass
class ProxyServerError(Exception): pass
class ProxyServer(tcp.TCPServer):
allow_reuse_address = True
bound = True
def __init__(self, config, port, address='', server_version=version.NAMEVERSION):
"""
Raises ProxyServerError if there's a startup problem.
"""
self.config, self.port, self.address = config, port, address
self.server_version = server_version
try:
tcp.TCPServer.__init__(self, (address, port))
except socket.error, v:
raise ProxyServerError('Error starting proxy server: ' + v.strerror)
self.channel = None
self.apps = AppRegistry()
def start_slave(self, klass, channel):
slave = klass(channel, self)
slave.start()
def set_channel(self, channel):
self.channel = channel
def handle_connection(self, request, client_address):
h = ProxyHandler(self.config, request, client_address, self, self.channel, self.server_version)
h.handle()
h.finish()
class AppRegistry:
def __init__(self):
self.apps = {}
def add(self, app, domain, port):
"""
Add a WSGI app to the registry, to be served for requests to the
specified domain, on the specified port.
"""
self.apps[(domain, port)] = wsgi.WSGIAdaptor(app, domain, port, version.NAMEVERSION)
def get(self, request):
"""
Returns an WSGIAdaptor instance if request matches an app, or None.
"""
if (request.host, request.port) in self.apps:
return self.apps[(request.host, request.port)]
if "host" in request.headers:
host = request.headers["host"][0]
return self.apps.get((host, request.port), None)
class DummyServer:
bound = False
def __init__(self, config):
self.config = config
def start_slave(self, *args):
pass
def shutdown(self):
pass
# Command-line utils
def certificate_option_group(parser):
group = parser.add_argument_group("SSL")
group.add_argument(
"--cert", action="store",
type = str, dest="cert", default=None,
help = "User-created SSL certificate file."
)
group.add_argument(
"--client-certs", action="store",
type = str, dest = "clientcerts", default=None,
help = "Client certificate directory."
)
TRANSPARENT_SSL_PORTS = [443, 8443]
def process_proxy_options(parser, options):
if options.cert:
options.cert = os.path.expanduser(options.cert)
if not os.path.exists(options.cert):
return parser.error("Manually created certificate does not exist: %s"%options.cert)
cacert = os.path.join(options.confdir, "mitmproxy-ca.pem")
cacert = os.path.expanduser(cacert)
if not os.path.exists(cacert):
certutils.dummy_ca(cacert)
body_size_limit = utils.parse_size(options.body_size_limit)
if options.reverse_proxy and options.transparent_proxy:
return parser.error("Can't set both reverse proxy and transparent proxy.")
if options.transparent_proxy:
if not platform.resolver:
return parser.error("Transparent mode not supported on this platform.")
trans = dict(
resolver = platform.resolver(),
sslports = TRANSPARENT_SSL_PORTS
)
else:
trans = None
if options.reverse_proxy:
rp = utils.parse_proxy_spec(options.reverse_proxy)
if not rp:
return parser.error("Invalid reverse proxy specification: %s"%options.reverse_proxy)
else:
rp = None
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(options.clientcerts) or not os.path.isdir(options.clientcerts):
return parser.error("Client certificate directory does not exist or is not a directory: %s"%options.clientcerts)
if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd):
if options.auth_singleuser:
if len(options.auth_singleuser.split(':')) != 2:
return parser.error("Invalid single-user specification. Please use the format username:password")
username, password = options.auth_singleuser.split(':')
password_manager = http_auth.PassManSingleUser(username, password)
elif options.auth_nonanonymous:
password_manager = http_auth.PassManNonAnon()
elif options.auth_htpasswd:
try:
password_manager = http_auth.PassManHtpasswd(options.auth_htpasswd)
except ValueError, v:
return parser.error(v.message)
authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy")
else:
authenticator = http_auth.NullProxyAuth(None)
return ProxyConfig(
certfile = options.cert,
cacert = cacert,
clientcerts = options.clientcerts,
body_size_limit = body_size_limit,
no_upstream_cert = options.no_upstream_cert,
reverse_proxy = rp,
transparent_proxy = trans,
authenticator = authenticator
)
| gpl-3.0 | -6,472,913,926,707,689,000 | 38.52707 | 187 | 0.551303 | false |
yesbox/ansible | lib/ansible/cli/playbook.py | 21 | 9325 | #!/usr/bin/env python
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
########################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.utils.vars import load_extra_vars
from ansible.utils.vars import load_options_vars
from ansible.vars import VariableManager
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
#---------------------------------------------------------------------------------------------------
class PlaybookCLI(CLI):
''' code behind ansible playbook cli'''
def parse(self):
# create parser for CLI options
parser = CLI.base_parser(
usage = "%prog playbook.yml",
connect_opts=True,
meta_opts=True,
runas_opts=True,
subset_opts=True,
check_opts=True,
inventory_opts=True,
runtask_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
)
# ansible playbook specific opts
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
parser.add_option('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
self.options, self.args = parser.parse_args(self.args[1:])
self.parser = parser
if len(self.args) == 0:
raise AnsibleOptionsError("You must specify a playbook file to run")
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# Manage passwords
sshpass = None
becomepass = None
vault_pass = None
passwords = {}
# don't deal with privilege escalation or passwords when we don't need to
if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax:
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords()[0]
loader.set_vault_password(vault_pass)
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
for playbook in self.args:
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager()
variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
variable_manager.options_vars = load_options_vars(self.options)
# create the inventory, and filter it based on the subset specified (if any)
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
variable_manager.set_inventory(inventory)
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
display.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
inventory.subset(self.options.subset)
if len(inventory.list_hosts()) == 0 and no_hosts is False:
# Invalid limit
raise AnsibleError("Specified --limit does not match any hosts")
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
if self.options.listhosts:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
display.display(msg)
all_tags = set()
if self.options.listtags or self.options.listtasks:
taskmsg = ''
if self.options.listtasks:
taskmsg = ' tasks:\n'
def _process_block(b):
taskmsg = ''
for task in b.block:
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
if task.action == 'meta':
continue
all_tags.update(task.tags)
if self.options.listtasks:
cur_tags = list(mytags.union(set(task.tags)))
cur_tags.sort()
if task.name:
taskmsg += " %s" % task.get_name()
else:
taskmsg += " %s" % task.action
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
return taskmsg
all_vars = variable_manager.get_vars(loader=loader, play=play)
play_context = PlayContext(play=play, options=self.options)
for block in play.compile():
block = block.filter_tagged_tasks(play_context, all_vars)
if not block.has_tasks():
continue
taskmsg += _process_block(block)
if self.options.listtags:
cur_tags = list(mytags.union(all_tags))
cur_tags.sort()
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
return 0
else:
return results
| gpl-3.0 | -8,271,853,953,624,678,000 | 41.97235 | 166 | 0.560107 | false |
dimagi/rapidsms-contrib-apps-dev | handlers/tests.py | 1 | 1655 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from nose.tools import assert_equal
from rapidsms.conf import settings
from .utils import get_handlers
def test_get_handlers():
# store current settings.
_settings = (
settings.INSTALLED_APPS,
settings.INSTALLED_HANDLERS,
settings.EXCLUDED_HANDLERS)
# clear current settings, to test in a predictable environment.
settings.INSTALLED_APPS = []
settings.INSTALLED_HANDLERS = None
settings.EXCLUDED_HANDLERS = None
assert_equal(get_handlers(), [])
# this crappy test depends upon the ``echo`` contrib app, which
# defines exactly two handlers. i don't have a cleaner solution.
settings.INSTALLED_APPS = ['rapidsms.contrib.echo']
from rapidsms.contrib.echo.handlers.echo import EchoHandler
from rapidsms.contrib.echo.handlers.ping import PingHandler
# check that both handlers were found as default
assert_equal(get_handlers(), [EchoHandler, PingHandler])
# exclude no handlers, explicitly include a single handler
settings.INSTALLED_HANDLERS = ['rapidsms.contrib.echo.handlers.ping']
assert_equal(get_handlers(), [PingHandler])
settings.INSTALLED_HANDLERS = []
# exclude a single handler
settings.EXCLUDED_HANDLERS = ['rapidsms.contrib.echo.handlers.ping']
assert_equal(get_handlers(), [EchoHandler])
# exclude all handlers from the echo app
settings.EXCLUDED_HANDLERS = ['rapidsms.contrib.echo']
assert_equal(get_handlers(), [])
# restore pre-test settings.
settings.INSTALLED_APPS,
settings.INSTALLED_HANDLERS,
settings.EXCLUDED_HANDLERS = _settings
| bsd-3-clause | -7,531,617,202,543,421,000 | 31.45098 | 73 | 0.711178 | false |
martatolos/DemandAnalysis | visualizations.py | 1 | 5518 | # Plots
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import re
import random
def plot_several_countries(df, ylabel, title, country_list="", save=False, num="", xticks_hourly=False, kind='bar', linestyle='-', color='mbygcr', marker='o', linewidth=4.0, fontsize=16, legend=True):
"""
This function plots a dataframe with several countries
@param df: data frame
@param ylabel: label for y axis
@param title: graphic title
@param kind: graphic type ex: bar or line
@param linestyle: lines style
@param color: color to use
@param marker: shape of point on a line
@param linewidth: line width
@param fontsize: font size
@return: n/a
"""
# Plotting
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
if xticks_hourly:
xticks_hourly = range(0,24)
else:
xticks_hourly = None
### PLOT FINAL
if kind == 'line':
graphic = df.plot(title=title, kind=kind, fontsize=fontsize, linestyle=linestyle, color=color,
linewidth=linewidth, marker=marker, xticks=xticks_hourly, figsize=(18,9))
else:
graphic = df.plot(title=title, kind=kind, fontsize=fontsize, color=color,
xticks=xticks_hourly, figsize=(18,9))
if legend == False:
graphic.legend_.remove()
graphic.set_ylabel(ylabel)
graphic.legend(prop={'size': 12})
if save==True and country_list!="":
namefile= re.sub("[\'\",\[\]]", "", str(country_list))
namefile= re.sub("[\s+]", "-", namefile)
if num=="":
num = random.randrange(1,100)
plt.savefig(namefile+str(num))
else:
plt.show()
def plot_yearly_consumption(df, country, kind='bar', linestyle='-', color='blue', marker='o', linewidth=4.0,fontsize=16):
"""
This function plots the yearly data from a monthlypowerconsumptions data frame
@param df: monthlypowerconsumptions data frame
@param df: country name to add on the title of the plot
@return: n/a
"""
# Plotting
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
### PLOT FINAL
if kind == 'line':
graphic = df.plot(x='year', y='Sum', title='Evolution of electricity consumption in '+ country, kind=kind, fontsize=fontsize, linestyle=linestyle, color=color , marker=marker)
else:
graphic = df.plot(x='year', y='Sum', title='Evolution of electricity consumption in '+ country, kind=kind, fontsize=fontsize, color=color)
graphic.set_ylabel('GWh')
plt.show()
def plot_monthly_average_consumption(mpc, country_list, ylabel='normalized', title='', kind='bar', linestyle='-', color='mbygcr', marker='o', linewidth=4.0, fontsize=16, legend=True):
"""
This function plots the yearly data from a monthlypowerconsumptions object
@param df: monthlypowerconsumptions object
@param country_list: country names to add on the title of the plot
@param ylabel: label for y axis
@param title: graphic title
@param kind: graphic type ex: bar or line
@param linestyle: lines style
@param color: color to use
@param marker: shape of point on a line
@param linewidth: line width
@param fontsize: font size
@return: n/a
"""
# Plotting
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
df = mpc.data_normalization(year=False)
df = df.groupby('country').mean()
del df['year']
del df['Sum']
df = df.T
plot_several_countries(df[country_list], ylabel, title, kind=kind, linestyle=linestyle, color=color, marker=marker, linewidth=linewidth, fontsize=fontsize, legend=legend)
def plot_average_week(df, ylabel='Normalized', title="Normalized average weekday consumption",kind='bar', color='rbbbbgg', rotation=50, legend=True):
# Plotting
"""
@param df: Data frame with the values to plot
@param ylabel: Label for the y axis
@param title: Title for the graphic
@param kind: Type of graphic: bar, line,...
@param color: color values
@param rotation: degrees for the ylabel rotation
@param legend: True or False legend on or off
"""
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
#create a dictionary for the week days
dayDict={0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thrusday', 4:'Friday', 5:'Saturday', 6:'Sunday'}
df = df[['Country', 'weekday', 'daily']]
df = df.pivot(index='weekday', columns='Country')
df = df.rename(index=dayDict)
df.columns = df.columns.droplevel()
# normalized
df = df/df.mean()
graphic = df.plot(title=title, kind=kind, color=color, legend=legend)
graphic.set_ylabel(ylabel)
graphic.legend(prop={'size': 12})
plt.xticks(rotation=rotation)
plt.show()
# #### PLOT FINAL
# # Plot the infaltion with the spanish consumption
# ES_info_year = ES_info[['year','Sum','inflation']]
# ES_info_year.set_index('year')
# plt.figure()
# ax = ES_info_year.plot(x='year', title='Consumption and Inflation in Spain', y='Sum', kind='bar',fontsize=16)
# ax.set_ylabel('Consumption - GWh')
# ax2 = ax.twinx()
# ax2.plot(ES_info_year['inflation'].values, linestyle='-', color='red', marker='o', linewidth=4.0)
# ax2.set_ylabel('Inflation - Annual Change [%]')
# plt.show() | gpl-2.0 | 8,165,022,937,362,936,000 | 33.49375 | 200 | 0.631026 | false |
MrSenko/Nitrate | tcms/testruns/urls/run_urls.py | 1 | 1045 | # -*- coding: utf-8 -*-
from django.conf.urls import url, patterns
from tcms.testruns.views import TestRunReportView
from tcms.testruns.views import AddCasesToRunView
urlpatterns = patterns(
'tcms.testruns.views',
url(r'^new/$', 'new'),
url(r'^(?P<run_id>\d+)/$', 'get'),
url(r'^(?P<run_id>\d+)/clone/$', 'new_run_with_caseruns'),
url(r'^(?P<run_id>\d+)/delete/$', 'delete'),
url(r'^(?P<run_id>\d+)/edit/$', 'edit'),
url(r'^(?P<run_id>\d+)/report/$', TestRunReportView.as_view(),
name='run-report'),
url(r'^(?P<run_id>\d+)/ordercase/$', 'order_case'),
url(r'^(?P<run_id>\d+)/changestatus/$', 'change_status'),
url(r'^(?P<run_id>\d+)/ordercaserun/$', 'order_case'),
url(r'^(?P<run_id>\d+)/removecaserun/$', 'remove_case_run'),
url(r'^(?P<run_id>\d+)/assigncase/$', AddCasesToRunView.as_view(),
name='add-cases-to-run'),
url(r'^(?P<run_id>\d+)/cc/$', 'cc'),
url(r'^(?P<run_id>\d+)/update/$', 'update_case_run_text'),
url(r'^(?P<run_id>\d+)/export/$', 'export'),
)
| gpl-2.0 | -8,638,650,171,695,771,000 | 33.833333 | 70 | 0.562679 | false |
Shaps/ansible | test/units/module_utils/common/test_collections.py | 39 | 5024 | # -*- coding: utf-8 -*-
# Copyright (c) 2018–2019, Sviatoslav Sydorenko <[email protected]>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
"""Test low-level utility functions from ``module_utils.common.collections``."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.six import Iterator
from ansible.module_utils.common._collections_compat import Sequence
from ansible.module_utils.common.collections import ImmutableDict, is_iterable, is_sequence
class SeqStub:
"""Stub emulating a sequence type.
>>> from collections.abc import Sequence
>>> assert issubclass(SeqStub, Sequence)
>>> assert isinstance(SeqStub(), Sequence)
"""
Sequence.register(SeqStub)
class IteratorStub(Iterator):
def __next__(self):
raise StopIteration
class IterableStub:
def __iter__(self):
return IteratorStub()
TEST_STRINGS = u'he', u'Україна', u'Česká republika'
TEST_STRINGS = TEST_STRINGS + tuple(s.encode('utf-8') for s in TEST_STRINGS)
TEST_ITEMS_NON_SEQUENCES = (
{}, object(), frozenset(),
4, 0.,
) + TEST_STRINGS
TEST_ITEMS_SEQUENCES = (
[], (),
SeqStub(),
)
TEST_ITEMS_SEQUENCES = TEST_ITEMS_SEQUENCES + (
# Iterable effectively containing nested random data:
TEST_ITEMS_NON_SEQUENCES,
)
@pytest.mark.parametrize('sequence_input', TEST_ITEMS_SEQUENCES)
def test_sequence_positive(sequence_input):
"""Test that non-string item sequences are identified correctly."""
assert is_sequence(sequence_input)
assert is_sequence(sequence_input, include_strings=False)
@pytest.mark.parametrize('non_sequence_input', TEST_ITEMS_NON_SEQUENCES)
def test_sequence_negative(non_sequence_input):
"""Test that non-sequences are identified correctly."""
assert not is_sequence(non_sequence_input)
@pytest.mark.parametrize('string_input', TEST_STRINGS)
def test_sequence_string_types_with_strings(string_input):
"""Test that ``is_sequence`` can separate string and non-string."""
assert is_sequence(string_input, include_strings=True)
@pytest.mark.parametrize('string_input', TEST_STRINGS)
def test_sequence_string_types_without_strings(string_input):
"""Test that ``is_sequence`` can separate string and non-string."""
assert not is_sequence(string_input, include_strings=False)
@pytest.mark.parametrize(
'seq',
([], (), {}, set(), frozenset(), IterableStub()),
)
def test_iterable_positive(seq):
assert is_iterable(seq)
@pytest.mark.parametrize(
'seq', (IteratorStub(), object(), 5, 9.)
)
def test_iterable_negative(seq):
assert not is_iterable(seq)
@pytest.mark.parametrize('string_input', TEST_STRINGS)
def test_iterable_including_strings(string_input):
assert is_iterable(string_input, include_strings=True)
@pytest.mark.parametrize('string_input', TEST_STRINGS)
def test_iterable_excluding_strings(string_input):
assert not is_iterable(string_input, include_strings=False)
class TestImmutableDict:
def test_scalar(self):
imdict = ImmutableDict({1: 2})
assert imdict[1] == 2
def test_string(self):
imdict = ImmutableDict({u'café': u'くらとみ'})
assert imdict[u'café'] == u'くらとみ'
def test_container(self):
imdict = ImmutableDict({(1, 2): ['1', '2']})
assert imdict[(1, 2)] == ['1', '2']
def test_from_tuples(self):
imdict = ImmutableDict((('a', 1), ('b', 2)))
assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2)))
def test_from_kwargs(self):
imdict = ImmutableDict(a=1, b=2)
assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2)))
def test_immutable(self):
imdict = ImmutableDict({1: 2})
expected_reason = r"^'ImmutableDict' object does not support item assignment$"
with pytest.raises(TypeError, match=expected_reason):
imdict[1] = 3
with pytest.raises(TypeError, match=expected_reason):
imdict[5] = 3
def test_hashable(self):
# ImmutableDict is hashable when all of its values are hashable
imdict = ImmutableDict({u'café': u'くらとみ'})
assert hash(imdict)
def test_nonhashable(self):
# ImmutableDict is unhashable when one of its values is unhashable
imdict = ImmutableDict({u'café': u'くらとみ', 1: [1, 2]})
expected_reason = r"^unhashable type: 'list'$"
with pytest.raises(TypeError, match=expected_reason):
hash(imdict)
def test_len(self):
imdict = ImmutableDict({1: 2, 'a': 'b'})
assert len(imdict) == 2
def test_repr(self):
initial_data = {1: 2, 'a': 'b'}
initial_data_repr = repr(initial_data)
imdict = ImmutableDict(initial_data)
actual_repr = repr(imdict)
expected_repr = "ImmutableDict({0})".format(initial_data_repr)
assert actual_repr == expected_repr
| gpl-3.0 | -6,191,244,763,495,012,000 | 29.722222 | 106 | 0.667269 | false |
gqwest-erp/server | openerp/addons/purchase_analytic_plans/__init__.py | 441 | 1220 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import purchase_analytic_plans
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,668,753,712,663,796,000 | 42.571429 | 79 | 0.553279 | false |
mxia/engine | build/android/gyp/create_java_binary_script.py | 26 | 2379 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a simple script to run a java "binary".
This creates a script that sets up the java command line for running a java
jar. This includes correctly setting the classpath and the main class.
"""
import optparse
import os
import sys
from util import build_utils
# The java command must be executed in the current directory because there may
# be user-supplied paths in the args. The script receives the classpath relative
# to the directory that the script is written in and then, when run, must
# recalculate the paths relative to the current directory.
script_template = """\
#!/usr/bin/env python
#
# This file was generated by build/android/gyp/create_java_binary_script.py
import os
import sys
self_dir = os.path.dirname(__file__)
classpath = [{classpath}]
if os.getcwd() != self_dir:
offset = os.path.relpath(self_dir, os.getcwd())
classpath = [os.path.join(offset, p) for p in classpath]
java_args = [
"java",
"-classpath", ":".join(classpath),
"-enableassertions",
\"{main_class}\"] + sys.argv[1:]
os.execvp("java", java_args)
"""
def main(argv):
argv = build_utils.ExpandFileArgs(argv)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--output', help='Output path for executable script.')
parser.add_option('--jar-path', help='Path to the main jar.')
parser.add_option('--main-class',
help='Name of the java class with the "main" entry point.')
parser.add_option('--classpath', action='append',
help='Classpath for running the jar.')
options, _ = parser.parse_args(argv)
classpath = [options.jar_path]
for cp_arg in options.classpath:
classpath += build_utils.ParseGypList(cp_arg)
run_dir = os.path.dirname(options.output)
classpath = [os.path.relpath(p, run_dir) for p in classpath]
with open(options.output, 'w') as script:
script.write(script_template.format(
classpath=('"%s"' % '", "'.join(classpath)),
main_class=options.main_class))
os.chmod(options.output, 0750)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -5,235,848,678,749,257,000 | 29.896104 | 80 | 0.702816 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_macpath.py | 62 | 4985 | import macpath
from test import support, test_genericpath
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
self.assertTrue(isabs(b"xx:yy"))
self.assertTrue(isabs(b"xx:yy:"))
self.assertTrue(isabs(b"xx:"))
self.assertFalse(isabs(b"foo"))
self.assertFalse(isabs(b":foo"))
self.assertFalse(isabs(b":foo:bar"))
self.assertFalse(isabs(b":foo:bar:"))
def test_split(self):
split = macpath.split
self.assertEqual(split("foo:bar"),
('foo:', 'bar'))
self.assertEqual(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEqual(split(":"), ('', ''))
self.assertEqual(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
self.assertEqual(split(b"foo:bar"),
(b'foo:', b'bar'))
self.assertEqual(split(b"conky:mountpoint:foo:bar"),
(b'conky:mountpoint:foo', b'bar'))
self.assertEqual(split(b":"), (b'', b''))
self.assertEqual(split(b":conky:mountpoint:"),
(b':conky:mountpoint', b''))
def test_join(self):
join = macpath.join
self.assertEqual(join('a', 'b'), ':a:b')
self.assertEqual(join('', 'a:b'), 'a:b')
self.assertEqual(join('a:b', 'c'), 'a:b:c')
self.assertEqual(join('a:b', ':c'), 'a:b:c')
self.assertEqual(join('a', ':b', ':c'), ':a:b:c')
self.assertEqual(join(b'a', b'b'), b':a:b')
self.assertEqual(join(b'', b'a:b'), b'a:b')
self.assertEqual(join(b'a:b', b'c'), b'a:b:c')
self.assertEqual(join(b'a:b', b':c'), b'a:b:c')
self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c')
def test_splitext(self):
splitext = macpath.splitext
self.assertEqual(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEqual(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEqual(splitext(".ext"), ('.ext', ''))
self.assertEqual(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEqual(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEqual(splitext(""), ('', ''))
self.assertEqual(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
self.assertEqual(splitext(b":foo.ext"), (b':foo', b'.ext'))
self.assertEqual(splitext(b"foo:foo.ext"), (b'foo:foo', b'.ext'))
self.assertEqual(splitext(b".ext"), (b'.ext', b''))
self.assertEqual(splitext(b"foo.ext:foo"), (b'foo.ext:foo', b''))
self.assertEqual(splitext(b":foo.ext:"), (b':foo.ext:', b''))
self.assertEqual(splitext(b""), (b'', b''))
self.assertEqual(splitext(b"foo.bar.ext"), (b'foo.bar', b'.ext'))
def test_ismount(self):
ismount = macpath.ismount
self.assertEqual(ismount("a:"), True)
self.assertEqual(ismount("a:b"), False)
self.assertEqual(ismount("a:b:"), True)
self.assertEqual(ismount(""), False)
self.assertEqual(ismount(":"), False)
self.assertEqual(ismount(b"a:"), True)
self.assertEqual(ismount(b"a:b"), False)
self.assertEqual(ismount(b"a:b:"), True)
self.assertEqual(ismount(b""), False)
self.assertEqual(ismount(b":"), False)
def test_normpath(self):
normpath = macpath.normpath
self.assertEqual(normpath("a:b"), "a:b")
self.assertEqual(normpath("a"), ":a")
self.assertEqual(normpath("a:b::c"), "a:c")
self.assertEqual(normpath("a:b:c:::d"), "a:d")
self.assertRaises(macpath.norm_error, normpath, "a::b")
self.assertRaises(macpath.norm_error, normpath, "a:b:::c")
self.assertEqual(normpath(":"), ":")
self.assertEqual(normpath("a:"), "a:")
self.assertEqual(normpath("a:b:"), "a:b")
self.assertEqual(normpath(b"a:b"), b"a:b")
self.assertEqual(normpath(b"a"), b":a")
self.assertEqual(normpath(b"a:b::c"), b"a:c")
self.assertEqual(normpath(b"a:b:c:::d"), b"a:d")
self.assertRaises(macpath.norm_error, normpath, b"a::b")
self.assertRaises(macpath.norm_error, normpath, b"a:b:::c")
self.assertEqual(normpath(b":"), b":")
self.assertEqual(normpath(b"a:"), b"a:")
self.assertEqual(normpath(b"a:b:"), b"a:b")
class MacCommonTest(test_genericpath.CommonTest):
pathmodule = macpath
def test_main():
support.run_unittest(MacPathTestCase, MacCommonTest)
if __name__ == "__main__":
test_main()
| mit | 3,482,159,813,867,012,600 | 38.251969 | 73 | 0.559478 | false |
apache/allura | ForgeSVN/forgesvn/model/svn.py | 2 | 31204 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import re
import os
import shutil
import string
import logging
import subprocess
import time
import operator as op
from subprocess import Popen, PIPE
from hashlib import sha1
from io import BytesIO
from datetime import datetime
import tempfile
from shutil import rmtree
import six
import tg
import pysvn
from paste.deploy.converters import asbool, asint
from pymongo.errors import DuplicateKeyError
from tg import tmpl_context as c, app_globals as g
from ming.base import Object
from ming.orm import Mapper, FieldProperty
from ming.utils import LazyProperty
from allura import model as M
from allura.lib import helpers as h
from allura.model.auth import User
from allura.model.repository import zipdir
from allura.model import repository as RM
from io import open
from six.moves import range
from six.moves import map
log = logging.getLogger(__name__)
class Repository(M.Repository):
tool_name = 'SVN'
repo_id = 'svn'
type_s = 'SVN Repository'
class __mongometa__:
name = str('svn-repository')
branches = FieldProperty([dict(name=str, object_id=str)])
_refresh_precompute = False
@LazyProperty
def _impl(self):
return SVNImplementation(self)
def latest(self, branch=None):
if self._impl is None:
return None
return self._impl.commit('HEAD')
def tarball_filename(self, revision, path=None):
fn = super(Repository, self).tarball_filename('r'+revision, path)
path = self._impl._tarball_path_clean(path, revision)
fn += ('-' + '-'.join(path.split('/'))) if path else ''
return fn
def rev_to_commit_id(self, rev):
return self._impl.rev_parse(rev)
class SVNCalledProcessError(Exception):
def __init__(self, cmd, returncode, stdout, stderr):
self.cmd = cmd
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command: '%s' returned non-zero exit status %s\nSTDOUT: %s\nSTDERR: %s" % \
(self.cmd, self.returncode, self.stdout, self.stderr)
def svn_path_exists(path, rev=None):
svn = SVNLibWrapper(pysvn.Client())
if rev:
rev = pysvn.Revision(pysvn.opt_revision_kind.number, rev)
else:
rev = pysvn.Revision(pysvn.opt_revision_kind.head)
try:
svn.info2(path, revision=rev, recurse=False)
return True
except pysvn.ClientError:
return False
class SVNLibWrapper(object):
"""Wrapper around pysvn, used for instrumentation."""
def __init__(self, client):
self.client = client
def checkout(self, *args, **kw):
return self.client.checkout(*args, **kw)
def add(self, *args, **kw):
return self.client.add(*args, **kw)
def checkin(self, *args, **kw):
return self.client.checkin(*args, **kw)
def info2(self, *args, **kw):
return self.client.info2(*args, **kw)
def log(self, *args, **kw):
return self.client.log(*args, **kw)
def cat(self, *args, **kw):
return self.client.cat(*args, **kw)
def list(self, *args, **kw):
return self.client.list(*args, **kw)
def __getattr__(self, name):
return getattr(self.client, name)
class SVNImplementation(M.RepositoryImplementation):
post_receive_template = string.Template(
'#!/bin/bash\n'
'# The following is required for site integration, do not remove/modify.\n'
'# Place user hook code in post-commit-user and it will be called from here.\n'
'curl -s $url\n'
'\n'
'DIR="$$(dirname "$${BASH_SOURCE[0]}")"\n'
'if [ -x $$DIR/post-commit-user ]; then'
' exec $$DIR/post-commit-user "$$@"\n'
'fi')
def __init__(self, repo):
self._repo = repo
@LazyProperty
def _svn(self):
return SVNLibWrapper(pysvn.Client())
@LazyProperty
def _url(self):
return 'file://%s%s' % (self._repo.fs_path, self._repo.name)
def shorthand_for_commit(self, oid):
return '[r%d]' % self._revno(self.rev_parse(oid))
def url_for_commit(self, commit, url_type=None):
if hasattr(commit, '_id'):
object_id = commit._id
elif commit == self._repo.app.default_branch_name:
object_id = commit
else:
object_id = self.rev_parse(commit)
if ':' in object_id:
object_id = str(self._revno(object_id))
return os.path.join(self._repo.url(), object_id) + '/'
def init(self, default_dirs=False, skip_special_files=False):
fullname = self._setup_paths()
log.info('svn init %s', fullname)
if os.path.exists(fullname):
shutil.rmtree(fullname)
subprocess.call(['svnadmin', 'create', self._repo.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._repo.fs_path)
if not skip_special_files:
self._setup_special_files()
self._repo.set_status('ready')
# make first commit with dir structure
if default_dirs:
tmp_working_dir = tempfile.mkdtemp(prefix='allura-svn-r1-',
dir=tg.config.get('scm.svn.tmpdir', g.tmpdir))
log.info('tmp dir = %s', tmp_working_dir)
self._repo._impl._svn.checkout(
'file://' + fullname, tmp_working_dir)
os.mkdir(tmp_working_dir + '/trunk')
os.mkdir(tmp_working_dir + '/tags')
os.mkdir(tmp_working_dir + '/branches')
self._repo._impl._svn.add(tmp_working_dir + '/trunk')
self._repo._impl._svn.add(tmp_working_dir + '/tags')
self._repo._impl._svn.add(tmp_working_dir + '/branches')
self._repo._impl._svn.checkin([tmp_working_dir + '/trunk',
tmp_working_dir + '/tags',
tmp_working_dir + '/branches'],
'Initial commit')
shutil.rmtree(tmp_working_dir)
log.info('deleted %s', tmp_working_dir)
def can_hotcopy(self, source_url):
if not (asbool(tg.config.get('scm.svn.hotcopy', True)) and
source_url.startswith('file://')):
return False
# check for svn version 1.7 or later
stdout, stderr, returncode = self.check_call(['svn', '--version'])
pattern = r'version (?P<maj>\d+)\.(?P<min>\d+)'
m = re.search(pattern, six.ensure_text(stdout))
return m and (int(m.group('maj')) * 10 + int(m.group('min'))) >= 17
def check_call(self, cmd, fail_on_error=True):
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input=b'p\n')
if p.returncode != 0 and fail_on_error:
self._repo.set_status('ready')
raise SVNCalledProcessError(cmd, p.returncode, stdout, stderr)
return stdout, stderr, p.returncode
def clone_from(self, source_url):
'''Initialize a repo as a clone of another using svnsync'''
self.init(skip_special_files=True)
def set_hook(hook_name):
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', hook_name)
with open(fn, 'w') as fp:
fp.write('#!/bin/sh\n')
os.chmod(fn, 0o755)
def clear_hook(hook_name):
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', hook_name)
os.remove(fn)
self._repo.set_status('importing')
log.info('Initialize %r as a clone of %s',
self._repo, source_url)
if self.can_hotcopy(source_url):
log.info('... cloning %s via hotcopy', source_url)
# src repo is on the local filesystem - use hotcopy (faster)
source_path, dest_path = source_url[7:], self._url[7:]
fullname = os.path.join(self._repo.fs_path, self._repo.name)
# hotcopy expects dest dir to not exist yet
if os.path.exists(fullname):
shutil.rmtree(fullname)
self.check_call(['svnadmin', 'hotcopy', source_path, dest_path])
# make sure new repo has a pre-revprop-change hook,
# otherwise the sync will fail
set_hook('pre-revprop-change')
self.check_call(
['svnsync', '--non-interactive', '--allow-non-empty',
'initialize', self._url, source_url])
clear_hook('pre-revprop-change')
else:
def retry_cmd(cmd, fail_count=0):
max_fail = asint(tg.config.get('scm.import.retry_count', 50))
returncode = -1
while returncode != 0 and fail_count < max_fail:
stdout, stderr, returncode = self.check_call(cmd, fail_on_error=False)
if returncode != 0:
fail_count += 1
log.info('Attempt %s. Error running %s Details:\n%s', fail_count, cmd, stderr)
time.sleep(asint(tg.config.get('scm.import.retry_sleep_secs', 5)))
if fail_count == max_fail:
raise SVNCalledProcessError(cmd, returncode, stdout, stderr)
return fail_count
set_hook('pre-revprop-change')
fail_count = retry_cmd(['svnsync', 'init', self._url, source_url])
fail_count = retry_cmd(['svnsync', '--non-interactive', 'sync', self._url], fail_count=fail_count)
clear_hook('pre-revprop-change')
log.info('... %r cloned', self._repo)
self.update_checkout_url()
self._setup_special_files(source_url)
def update_checkout_url(self):
"""Validate the current ``checkout_url`` against the on-disk repo,
and change it if necessary.
If ``checkout_url`` is valid and not '', no changes are made.
If ``checkout_url`` is invalid or '':
- Set it to 'trunk' if repo has a top-level trunk directory
- Else, set it to ''
"""
opts = self._repo.app.config.options
if not svn_path_exists('file://{0}{1}/{2}'.format(self._repo.fs_path,
self._repo.name, opts['checkout_url'])):
opts['checkout_url'] = ''
if (not opts['checkout_url'] and
svn_path_exists(
'file://{0}{1}/trunk'.format(self._repo.fs_path,
self._repo.name))):
opts['checkout_url'] = 'trunk'
def commit(self, rev):
oid = self.rev_parse(rev)
result = M.repository.Commit.query.get(_id=oid)
if result:
result.set_context(self._repo)
return result
def rev_parse(self, rev):
if rev in ('HEAD', None):
return self._oid(self.head)
elif isinstance(rev, int) or rev.isdigit():
return self._oid(rev)
else:
return rev
def all_commit_ids(self):
"""Return a list of commit ids, starting with the head (most recent
commit) and ending with the root (first commit).
"""
head_revno = self.head
return list(map(self._oid, list(range(head_revno, 0, -1))))
def new_commits(self, all_commits=False):
head_revno = self.head
oids = [self._oid(revno) for revno in range(1, head_revno + 1)]
if all_commits:
return oids
# Find max commit id -- everything greater than that will be "unknown"
prefix = self._oid('')
q = M.repository.Commit.query.find(
dict(
type='commit',
_id={'$gt': prefix},
),
dict(_id=True)
)
seen_oids = set()
for d in q.ming_cursor.cursor:
oid = d['_id']
if not oid.startswith(prefix):
break
seen_oids.add(oid)
return [o for o in oids if o not in seen_oids]
def refresh_commit_info(self, oid, seen_object_ids, lazy=True):
from allura.model.repository import CommitDoc
ci_doc = CommitDoc.m.get(_id=oid)
if ci_doc and lazy:
return False
revno = self._revno(oid)
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty',
oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
log_date = None
if hasattr(log_entry, 'date'):
log_date = datetime.utcfromtimestamp(log_entry.date)
user = Object(
name=h.really_unicode(log_entry.get('author', '--none--')),
email='',
date=log_date)
args = dict(
tree_id=None,
committed=user,
authored=user,
message=h.really_unicode(log_entry.get("message", "--none--")),
parent_ids=[],
child_ids=[])
if revno > 1:
args['parent_ids'] = [self._oid(revno - 1)]
if ci_doc:
ci_doc.update(**args)
ci_doc.m.save()
else:
ci_doc = CommitDoc(dict(args, _id=oid))
try:
ci_doc.m.insert()
except DuplicateKeyError:
if lazy:
return False
return True
def compute_tree_new(self, commit, tree_path='/'):
# always leading slash, never trailing
tree_path = '/' + tree_path.strip('/')
tree_id = self._tree_oid(commit._id, tree_path)
tree = RM.Tree.query.get(_id=tree_id)
if tree:
return tree_id
log.debug('Computing tree for %s: %s',
self._revno(commit._id), tree_path)
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for: %s: %s(%s)',
self._repo, commit, tree_path)
return None
log.debug('Compute tree for %d paths', len(infos))
tree_ids = []
blob_ids = []
lcd_entries = []
for path, info in infos[1:]:
if info.kind == pysvn.node_kind.dir:
tree_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
elif info.kind == pysvn.node_kind.file:
blob_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
else:
raise AssertionError()
lcd_entries.append(dict(
name=path,
commit_id=self._oid(info.last_changed_rev.number),
))
tree, is_new = RM.Tree.upsert(tree_id,
tree_ids=tree_ids,
blob_ids=blob_ids,
other_ids=[],
)
if is_new:
commit_id = self._oid(infos[0][1].last_changed_rev.number)
path = tree_path.strip('/')
RM.LastCommitDoc.m.update_partial(
{'commit_id': commit_id, 'path': path},
{'commit_id': commit_id, 'path':
path, 'entries': lcd_entries},
upsert=True)
return tree_id
def _tree_oid(self, commit_id, path):
data = 'tree\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _blob_oid(self, commit_id, path):
data = 'blob\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _obj_oid(self, commit_id, info):
path = info.URL[len(info.repos_root_URL):]
if info.kind == pysvn.node_kind.dir:
return self._tree_oid(commit_id, path)
else:
return self._blob_oid(commit_id, path)
def log(self, revs=None, path=None, exclude=None, id_only=True, limit=25, **kw):
"""
Returns a generator that returns information about commits reachable
by revs.
revs can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If revs is None, the
default head will be used.
If path is not None, only commits which modify files under path
will be included.
Exclude can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If not None, then any
revisions reachable by any of the revisions in exclude will not be
included.
If id_only is True, returns only the commit ID, otherwise it returns
detailed information about each commit.
Since pysvn doesn't have a generator version of log, this tries to
balance pulling too much data from SVN with calling SVN too many
times by pulling in pages of page_size at a time.
"""
if revs is None:
revno = self.head
else:
revno = max([self._revno(self.rev_parse(r)) for r in revs])
if exclude is None:
exclude = 0
else:
exclude = max([self._revno(self.rev_parse(r)) for r in exclude])
if path is None:
url = self._url
else:
url = '/'.join([self._url, path.strip('/')])
while revno > exclude:
rev = pysvn.Revision(pysvn.opt_revision_kind.number, revno)
try:
logs = self._svn.log(
url, revision_start=rev, peg_revision=rev, limit=limit,
discover_changed_paths=True)
except pysvn.ClientError as e:
if 'Unable to connect' in e.args[0]:
raise # repo error
return # no (more) history for this path
for ci in logs:
if ci.revision.number <= exclude:
return
if id_only:
yield ci.revision.number
else:
yield self._map_log(ci, url, path)
if len(logs) < limit:
# we didn't get a full page, don't bother calling SVN again
return
revno = ci.revision.number - 1
def _check_changed_path(self, changed_path, path):
if (changed_path['copyfrom_path'] and
changed_path['path'] and
path and
(len(changed_path['path']) < len(path)) and
path.startswith(changed_path['path'])):
changed_path['copyfrom_path'] = changed_path['copyfrom_path'] + \
path[len(changed_path['path']):]
changed_path['path'] = path
return changed_path
def _map_log(self, ci, url, path=None):
revno = ci.revision.number
rev = pysvn.Revision(pysvn.opt_revision_kind.number, revno)
size = None
if path:
try:
size = self._svn.list(url, revision=rev, peg_revision=rev)[0][0].size
except pysvn.ClientError:
pass
rename_details = {}
changed_paths = ci.get('changed_paths', [])
for changed_path in changed_paths:
changed_path = self._check_changed_path(changed_path, path)
if changed_path['copyfrom_path'] and changed_path['path'] == path and changed_path['action'] == 'A':
rename_details['path'] = changed_path['copyfrom_path']
rename_details['commit_url'] = self._repo.url_for_commit(
changed_path['copyfrom_revision'].number
)
break
return {
'id': revno,
'message': h.really_unicode(ci.get('message', '--none--')),
'authored': {
'name': h.really_unicode(ci.get('author', '--none--')),
'email': '',
'date': datetime.utcfromtimestamp(ci.date),
},
'committed': {
'name': h.really_unicode(ci.get('author', '--none--')),
'email': '',
'date': datetime.utcfromtimestamp(ci.date),
},
'refs': ['HEAD'] if revno == self.head else [],
'parents': [revno - 1] if revno > 1 else [],
'size': size,
'rename_details': rename_details,
}
def open_blob(self, blob):
data = self._svn.cat(
self._url + h.urlquote(blob.path()),
revision=self._revision(blob.commit._id))
return BytesIO(data)
def blob_size(self, blob):
try:
rev = self._revision(blob.commit._id)
data = self._svn.list(
self._url + blob.path(),
revision=rev,
peg_revision=rev,
dirent_fields=pysvn.SVN_DIRENT_SIZE)
except pysvn.ClientError:
log.info('ClientError getting filesize %r %r, returning 0',
blob.path(), self._repo, exc_info=True)
return 0
try:
size = data[0][0]['size']
except (IndexError, KeyError):
log.info(
'Error getting filesize: bad data from svn client %r %r, returning 0',
blob.path(), self._repo, exc_info=True)
size = 0
return size
def _setup_hooks(self, source_path=None):
'Set up the post-commit and pre-revprop-change hooks'
# setup a post-commit hook to notify Allura of changes to the repo
# the hook should also call the user-defined post-commit-user hook
text = self.post_receive_template.substitute(
url=self._repo.refresh_url())
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', 'post-commit')
with open(fn, 'w') as fp:
fp.write(text)
os.chmod(fn, 0o755)
def _revno(self, oid):
return int(oid.split(':')[1])
def _revision(self, oid):
return pysvn.Revision(
pysvn.opt_revision_kind.number,
self._revno(oid))
def _oid(self, revno):
return '%s:%s' % (self._repo._id, revno)
def last_commit_ids(self, commit, paths):
'''
Return a mapping {path: commit_id} of the _id of the last
commit to touch each path, starting from the given commit.
Since SVN Diffs are computed on-demand, we can't walk the
commit tree to find these. However, we can ask SVN for it
with a single call, so it shouldn't be too expensive.
NB: This assumes that all paths are direct children of a
single common parent path (i.e., you are only asking for
a subset of the nodes of a single tree, one level deep).
'''
if len(paths) == 1:
tree_path = '/' + os.path.dirname(paths[0].strip('/'))
else:
# always leading slash, never trailing
tree_path = '/' + os.path.commonprefix(paths).strip('/')
paths = [path.strip('/') for path in paths]
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for: %s: %s(%s)',
self._repo, commit, tree_path)
return None
entries = {}
for path, info in infos[1:]:
path = os.path.join(tree_path, path).strip('/')
if path in paths:
entries[path] = self._oid(info.last_changed_rev.number)
return entries
def get_changes(self, oid):
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty',
oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
return [p.path for p in log_entry.changed_paths]
def _tarball_path_clean(self, path, rev=None):
if path:
return path.strip('/')
else:
trunk_exists = svn_path_exists('file://%s%s/%s' % (self._repo.fs_path, self._repo.name, 'trunk'), rev)
if trunk_exists:
return 'trunk'
return ''
def tarball(self, commit, path=None):
"""
Makes a svn export at `tmpdest`
then zips that into `dest/tmpfilename`
then renames that to `dest/filename`
"""
path = self._tarball_path_clean(path, commit)
if not os.path.exists(self._repo.tarball_path):
os.makedirs(self._repo.tarball_path)
if not os.path.exists(self._repo.tarball_tmpdir):
os.makedirs(self._repo.tarball_tmpdir)
archive_name = self._repo.tarball_filename(commit, path)
dest = os.path.join(self._repo.tarball_path, archive_name)
tmpdest = os.path.join(self._repo.tarball_tmpdir, archive_name)
filename = os.path.join(self._repo.tarball_path, '%s%s' % (archive_name, '.zip')).encode('utf-8')
tmpfilename = os.path.join(self._repo.tarball_path, '%s%s' % (archive_name, '.tmp')).encode('utf-8')
rmtree(dest.encode('utf8'), ignore_errors=True) # must encode into bytes or it'll fail on non-ascii filenames
rmtree(tmpdest.encode('utf8'), ignore_errors=True)
path = os.path.join(self._url, path)
try:
# need to set system locale to handle all symbols in filename
import locale
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
self._svn.export(path,
tmpdest,
revision=pysvn.Revision(
pysvn.opt_revision_kind.number, commit),
ignore_externals=True)
zipdir(tmpdest, tmpfilename)
os.rename(tmpfilename, filename)
finally:
rmtree(dest.encode('utf8'), ignore_errors=True)
rmtree(tmpdest.encode('utf8'), ignore_errors=True)
if os.path.exists(tmpfilename):
os.remove(tmpfilename)
def is_empty(self):
return self.head == 0
def is_file(self, path, rev=None):
url = '/'.join([self._url, path.strip('/')])
rev = pysvn.Revision(pysvn.opt_revision_kind.number,
self._revno(self.rev_parse(rev)))
try:
info = self._svn.list(
url, revision=rev, peg_revision=rev, dirent_fields=pysvn.SVN_DIRENT_KIND)[0][0]
return info.kind == pysvn.node_kind.file
except pysvn.ClientError:
return False
def symbolics_for_commit(self, commit):
return [], []
@LazyProperty
def head(self):
try:
return int(self._svn.revpropget('revision', url=self._url)[0].number)
except pysvn.ClientError as e:
error_lines = str(e).splitlines()
if all(errline.startswith(("Unable to connect", "Unable to open")) for errline in error_lines):
# simple common error e.g. empty repo directory
return 0
else:
raise
@LazyProperty
def heads(self):
return [Object(name=None, object_id=self._oid(self.head))]
@LazyProperty
def branches(self):
return []
@LazyProperty
def tags(self):
return []
def paged_diffs(self, commit_id, start=0, end=None, onlyChangedFiles=False):
result = {'added': [], 'removed': [], 'changed': [], 'copied': [], 'renamed': [], 'total': 0}
rev = self._revision(commit_id)
try:
log_info = self._svn.log(
self._url,
revision_start=rev,
revision_end=rev,
discover_changed_paths=True)
except pysvn.ClientError:
log.info('Error getting paged_diffs log of %s on %s',
commit_id, self._url, exc_info=True)
return result
if len(log_info) == 0:
return result
paths = sorted(log_info[0].changed_paths, key=op.itemgetter('path'))
result['total'] = len(paths)
for p in paths[start:end]:
if p['copyfrom_path'] is not None:
result['copied'].append({
'new': h.really_unicode(p.path),
'old': h.really_unicode(p.copyfrom_path),
'ratio': 1,
})
elif p['action'] == 'A':
result['added'].append(h.really_unicode(p.path))
elif p['action'] == 'D':
result['removed'].append(h.really_unicode(p.path))
elif p['action'] in ['M', 'R']:
# 'R' means 'Replaced', i.e.
# svn rm aaa.txt
# echo "Completely new aaa!" > aaa.txt
# svn add aaa.txt
# svn commit -m "Replace aaa.txt"
result['changed'].append(h.really_unicode(p.path))
for r in result['copied'][:]:
if r['old'] in result['removed']:
result['removed'].remove(r['old'])
result['copied'].remove(r)
result['renamed'].append(r)
if r['new'] in result['added']:
result['added'].remove(r['new'])
return result
Mapper.compile_all()
| apache-2.0 | -9,013,672,863,622,275,000 | 37.428571 | 118 | 0.540251 | false |
TheMOOCAgency/edx-platform | lms/djangoapps/grades/config/forms.py | 16 | 1207 | """
Defines a form for providing validation of subsection grade templates.
"""
import logging
from django import forms
from lms.djangoapps.grades.config.models import CoursePersistentGradesFlag
from opaque_keys import InvalidKeyError
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locator import CourseLocator
log = logging.getLogger(__name__)
class CoursePersistentGradesAdminForm(forms.ModelForm):
"""Input form for subsection grade enabling, allowing us to verify data."""
class Meta(object):
model = CoursePersistentGradesFlag
fields = '__all__'
def clean_course_id(self):
"""Validate the course id"""
cleaned_id = self.cleaned_data["course_id"]
try:
course_key = CourseLocator.from_string(cleaned_id)
except InvalidKeyError:
msg = u'Course id invalid. Entered course id was: "{0}."'.format(cleaned_id)
raise forms.ValidationError(msg)
if not modulestore().has_course(course_key):
msg = u'Course not found. Entered course id was: "{0}". '.format(course_key.to_deprecated_string())
raise forms.ValidationError(msg)
return course_key
| agpl-3.0 | -1,398,700,677,401,873,200 | 31.621622 | 111 | 0.690141 | false |
mayblue9/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause | 2,424,201,460,111,960,000 | 36.568421 | 79 | 0.566545 | false |
nearlyfreeapps/python-googleadwords | examples/adspygoogle/adwords/v201109/basic_operations/get_campaigns.py | 1 | 2192 | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all campaigns. To add a campaign, run add_campaign.py.
Tags: CampaignService.get
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
PAGE_SIZE = 100
def main(client):
# Initialize appropriate service.
campaign_service = client.GetCampaignService(
'https://adwords-sandbox.google.com', 'v201109')
# Construct selector and get all campaigns.
offset = 0
selector = {
'fields': ['Id', 'Name', 'Status'],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
while more_pages:
page = campaign_service.Get(selector)[0]
# Display results.
if 'entries' in page:
for campaign in page['entries']:
print ('Campaign with id \'%s\', name \'%s\', and status \'%s\' was '
'found.' % (campaign['id'], campaign['name'],
campaign['status']))
else:
print 'No campaigns were found.'
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client)
| apache-2.0 | -3,083,286,908,601,078,000 | 28.226667 | 77 | 0.632299 | false |
klenks/jobsportal | venv/bin/activate_this.py | 1076 | 1137 | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
| mit | -8,731,092,708,964,023,000 | 32.441176 | 113 | 0.686895 | false |
40223133/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/dom/minidom.py | 727 | 66854 | """Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
if encoding is None:
writer = io.StringIO()
else:
writer = io.TextIOWrapper(io.BytesIO(),
encoding=encoding,
errors="xmlcharrefreplace",
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.getvalue()
else:
return writer.detach().getvalue()
def hasChildNodes(self):
return bool(self.childNodes)
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.previousSibling = last
last.nextSibling = node
childNodes.append(node)
node.parentNode = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
__slots__=('_name', '_value', 'namespaceURI',
'_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
nodeType = Node.ATTRIBUTE_NODE
attributes = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
self.ownerElement = None
self._name = qName
self.namespaceURI = namespaceURI
self._prefix = prefix
self.childNodes = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
nodeName = name = property(_get_name, _set_name)
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.childNodes[0].data = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
nodeValue = value = property(_get_value, _set_value)
def _get_prefix(self):
return self._prefix
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
self._prefix = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.name = newName
prefix = property(_get_prefix, _set_prefix)
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
__slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
'nextSibling', 'previousSibling')
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.parentNode = None
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self.nextSibling = self.previousSibling = None
# Attribute dictionaries are lazily created
# attributes are double-indexed:
# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
self._attrs = None
self._attrsNS = None
def _ensure_attributes(self):
if self._attrs is None:
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
if self._attrs is not None:
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
if self._attrs is None:
return ""
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return ""
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
attr.value = value # also sets nodeValue
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.value = value
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
else:
if value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
attr.prefix = prefix
attr.nodeName = qualifiedName
def getAttributeNode(self, attrname):
if self._attrs is None:
return None
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return None
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
self._ensure_attributes()
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
if self._attrs is None:
return False
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return False
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
self._ensure_attributes()
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr._is_id = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._ensure_attributes()
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
__slots__ = ()
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
__slots__ = ('target', 'data')
def __init__(self, target, data):
self.target = target
self.data = data
# nodeValue is an alias for data
def _get_nodeValue(self):
return self.data
def _set_nodeValue(self, value):
self.data = data
nodeValue = property(_get_nodeValue, _set_nodeValue)
# nodeName is an alias for target
def _get_nodeName(self):
return self.target
def _set_nodeName(self, value):
self.target = value
nodeName = property(_get_nodeName, _set_nodeName)
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
__slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
def __init__(self):
self.ownerDocument = self.parentNode = None
self.previousSibling = self.nextSibling = None
self._data = ''
Node.__init__(self)
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
data = nodeValue = property(_get_data, _set_data)
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
__slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
self.data = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
CharacterData.__init__(self)
self._data = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
__slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
__slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
__slots__ = ('_elem_info', 'doctype',
'_id_search_stack', 'childNodes', '_id_cache')
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
implementation = DOMImplementation()
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
parentNode = None
previousSibling = nextSibling = None
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.doctype = None
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
n.prefix = prefix
n._localName = localName
n.namespaceURI = namespaceURI
n.nodeName = name
if n.nodeType == Node.ELEMENT_NODE:
n.tagName = name
else:
# attribute node
n.name = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
| gpl-3.0 | -4,043,681,963,868,571,000 | 32.662638 | 87 | 0.582164 | false |
cts2/rf2service | server/Server.py | 1 | 3285 | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from rf2db.utils import urlutil
from rf2db.db.RF2DBConnection import cp_values
from rf2db.db.RF2FileCommon import rf2_values
from server.BaseNode import BaseNode, expose, xmlVal, htmlHead
html = htmlHead + """
<html>
<head>
<title>RF2 Server Configuration</title>
</head>
<body>
<h1>Database Configuration</h1>
<table border="1">
<tr>
<td>Host</td>
<td>%(host)s
</tr>
<tr>
<td>Port</td>
<td>%(port)s</td>
</tr>
<tr>
<td>DB</td>
<td>%(db)s</td>
</tr>
<tr>
<td>Charset</td>
<td>%(charset)s</td>
</tr>
</table>
<h1>URL Settings</h1>
<table border="1">
<tr>
<td>Host</td>
<td>%(href_host)s</td>
</tr>
<tr>
<td>Root</td>
<td>%(href_root)s</td>
</tr>
<tr>
<td>Relative URI</td>
<td>%(reluri)s</td>
</tr>
<tr>
<td>Base URI</td>
<td>%(baseuri)s</td>
</tr>
<tr>
<td>Complete URI</td>
<td>%(completeuri)s</td>
</tr>
</table>
</body>
</html>"""
class ServerConf(BaseNode):
namespace = ''
@expose
def default(self, *args, **kwargs):
host = cp_values.host
port = cp_values.port
db = cp_values.db
charset = cp_values.charset
href_host = urlutil.href_settings.host
href_root = urlutil.href_settings.root
reluri = urlutil.relative_uri()
baseuri = urlutil.base_uri()
completeuri = urlutil.complete_uri()
return html % vars()
@expose
def status(self, *args, **kwargs):
return (xmlVal % ('<status>OK</status><rf2_release>%s</rf2_release>' % rf2_values.release),
(0, None))
| bsd-3-clause | -5,629,486,767,426,773,000 | 28.863636 | 99 | 0.64414 | false |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/numpy/random/setupscons.py | 100 | 1384 | import glob
from os.path import join, split
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random',parent_package,top_path)
source_files = [join('mtrand', i) for i in ['mtrand.c',
'mtrand.pyx',
'numpy.pxi',
'randomkit.c',
'randomkit.h',
'Python.pxi',
'initarray.c',
'initarray.h',
'distributions.c',
'distributions.h',
]]
config.add_sconscript('SConstruct', source_files = source_files)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| gpl-3.0 | 2,377,368,257,002,301,400 | 33.6 | 69 | 0.438584 | false |
rodrigob/fuel | fuel/datasets/iris.py | 6 | 1038 | from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
class Iris(H5PYDataset):
u"""Iris dataset.
Iris [LBBH] is a simple pattern recognition dataset, which consist of
3 classes of 50 examples each having 4 real-valued features each, where
each class refers to a type of iris plant. It is accessible through the
UCI Machine Learning repository [UCI].
.. [IRIS] Ronald A. Fisher, *The use of multiple measurements in
taxonomic problems*, Annual Eugenics, 7, Part II, 179-188,
September 1936.
.. [UCI] https://archive.ics.uci.edu/ml/datasets/Iris
Parameters
----------
which_sets : tuple of str
Which split to load. Valid value is 'all'
corresponding to 150 examples.
"""
filename = 'iris.hdf5'
def __init__(self, which_sets, **kwargs):
kwargs.setdefault('load_in_memory', True)
super(Iris, self).__init__(
file_or_path=find_in_data_path(self.filename),
which_sets=which_sets, **kwargs)
| mit | -3,161,591,163,736,271,400 | 32.483871 | 75 | 0.654143 | false |
vipints/oqtans | oqtans_tools/KIRMES/0.8/src/kmotif.py | 2 | 10908 | """
#######################################################################################
# #
# kmotif.py is a command-line front-end to the KIRMES pipeline #
# BibTeX entries below. Please cite: #
# Sebastian J. Schulheiss, Wolfgang Busch, Jan U. Lohmann, Oliver Kohlbacher, #
# and Gunnar Raetsch (2009) KIRMES: Kernel-based identification of regulatory #
# modules in euchromatic sequences. Bioinformatics 16(25):2126-33. #
# #
# Copyright (C) 2007-20010 Sebastian J. Schultheiss <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#######################################################################################
# #
# Original Author: Sebastian J. Schultheiss, version 0.8.0 #
# Please add a notice of any modifications here: #
# #
# #
#######################################################################################
"""
__version__ = "0.8.0"
__license__ = "GNU General Public License"
# bibtex entry
__author__ = """
@article{Schultheiss2009KIRMES,
author = {Sebastian J. Schultheiss and Wolfgang Busch and Jan U. Lohmann and Oliver Kohlbacher and Gunnar Raetsch},
title = {{KIRMES}: Kernel-based identification of regulatory modules in euchromatic sequences},
year = {2009},
journal = {Bioinformatics},
publisher = {Oxford Journals},
volume = {25},
issue = {16},
pages = {2126--33},
month = {April},
doi = {10.1093/bioinformatics/btp278},
abstract = {Motivation: Understanding transcriptional regulation is one of the
main challenges in computational biology. An important problem is
the identification of transcription factor binding sites in promoter
regions of potential transcription factor target genes. It is
typically approached by position weight matrix-based motif
identification algorithms using Gibbs sampling, or heuristics to
extend seed oligos. Such algorithms succeed in identifying single,
relatively well-conserved binding sites, but tend to fail when it
comes to the identification of combinations of several degenerate
binding sites, as those often found in cis-regulatory modules.
Results: We propose a new algorithm that combines the benefits of
existing motif finding with the ones of Support Vector Machines (SVMs)
to find degenerate motifs in order to improve the modeling of
regulatory modules. In experiments on microarray data from Arabidopsis
thaliana, we were able to show that the newly developed strategy
significantly improves the recognition of transcription factor targets.
Availability: The PYTHON source code (open source-licensed under GPL),
the data for the experiments and a Galaxy-based web service are
available at http://www.fml.mpg.de/raetsch/projects/kirmes.
Contact: [email protected]},
URL = {http://bioinformatics.oxfordjournals.org/cgi/content/abstract/btp278v1}
}
"""
__usage__ = """Usage:
To find motifs in 2 FASTA files, supply positives and negatives:
%prog -t -p positives.fasta -n negatives.fasta [options]
"""
# system imports
import os
from optparse import OptionParser, OptionValueError
import sys
from shutil import copy
try:
# own imports
import kirmes_ini
from Inclusive import Inclusive
from Kmers import Kmers, createIMMFile
except ImportError:
print "ImportError:"
print "One of the required imports failed, please make sure the files "
print "kirmes_ini.py or KIRMES_INI.pyc, "
print "DBTools.py or DBTools.pyc and FileTools.py or FileTools.pyc"
print "are present in the current directory, or download this program again."
raise
def check_file(option, opt_str, value, _parser):
"""See if a file exists on the file system, raises an OptionValueError"""
if value == "None":
value = None
elif not os.path.isfile(value):
raise OptionValueError("Cannot open %s as a file. Please check if it exists." % value)
setattr(_parser.values, option.dest, value)
def check_pickle(option, opt_str, value, _parser):
"""Check for the kernel file in testing mode"""
if not _parser.values.train:
check_file(option, opt_str, value, _parser)
else:
setattr(_parser.values, option.dest, value)
def optionparse(parser):
"""Completes the option parser object, adding defaults and options"""
parser.add_option("-t", "--type", type = "string", dest = "type",
help = "motif finding strategy to use: MotifSampler (kims), PRIORITY (krgp), k-mer (kkmc), or just the locator, must supply a valid imm file (kiml) [default %default]")
parser.add_option("-p", "--positives", dest = "positives",
action = "callback", callback = check_file, type = "string",
help="path to the fasta file with a positive set of regulatory regions [default %default]")
parser.add_option("-n", "--negatives", dest = "negatives", type = "string",
action = "callback", callback = check_file,
help="path to the fasta file with a negative set of regulatory regions [default %default]")
parser.add_option("-i", "--pgff", dest = "pgff", type = "string",
help="path to the output gff file of motif positions from the positive regulatory regions [default %default]")
parser.add_option("-j", "--ngff", dest = "ngff", type = "string",
help="path to the output gff file of motif positions from the negative regulatory regions [default %default]")
parser.add_option("-x", "--matrix", dest = "imm", type = "string",
help="path to the input or output imm file of motif motif models as position weight matrices [default %default]")
parser.add_option("-m", "--motifs", type = "int", dest = "nof_motifs",
help = "number of motifs to consider [default %default]")
parser.add_option("-l", "--length", type = "int", dest = "motif_length",
help = "length of the motifs to search for [default %default]")
parser.set_defaults(positives = kirmes_ini.POSITIVES_FILENAME,
negatives = kirmes_ini.NEGATIVES_FILENAME,
nof_motifs = kirmes_ini.NOF_MOTIFS,
motif_length = kirmes_ini.MOTIF_LENGTH,
type = kirmes_ini.SAMPLING_STRATEGY,
ngff = kirmes_ini.NGFF_FILENAME,
pgff = kirmes_ini.PGFF_FILENAME,
imm = kirmes_ini.IMM_FILENAME)
def motifScan(fastafile, matrixfile, gfffile):
"""Search for motifs with existing matrix defintion"""
ive = Inclusive()
ive.fastafilename = fastafile
ive.immfilename = matrixfile
gff = ive.getMotifGff()
copy(gff, gfffile)
def kims(options):
"""Run the MotifSampler Program"""
ive = Inclusive()
ive.fastafilename = options.positives
ive.settings.setMotifLength(options.motif_length)
ive.settings.setNofMotifs(options.nof_motifs)
pgff = ive.getMotifGff()
copy(pgff, options.pgff)
#second round, find motifs in negative sequences
ive.fastafilename = options.negatives
ngff = ive.getMotifGff()
copy(ngff, options.ngff)
imm = ive.getMatrixFile()
copy(imm, options.imm)
def krgp(options):
"""Run the Priority Program (falls back to Inclusive)"""
kims(options)
def kkmc(options):
"""Run the kmer counting strategy, search for motifs with KIML"""
mer = Kmers()
mer.setFastaFile(options.positives)
mer.settings.setMotifLength(options.motif_length)
pkmerdict = mer.countKmers(options.nof_motifs)
createIMMFile(options.imm, pkmerdict.keys())
motifScan(options.positives, options.imm, options.pgff)
motifScan(options.negatives, options.imm, options.ngff)
def kiml(options):
"""Search for motifs with existing matrix defintion"""
motifScan(options.positives, options.imm, options.pgff)
if options.negatives:
motifScan(options.negatives, options.imm, options.ngff)
#######################
# main #
#######################
def main(argv = None):
"""main() block"""
if argv is None:
argv = sys.argv
parser = OptionParser(version = "%prog " + __version__, usage = __usage__)
optionparse(parser)
(options, args) = parser.parse_args()
if options.type == "krgp":
krgp(options)
elif options.type == "kkmc":
kkmc(options)
elif options.type == "kiml":
kiml(options)
else:
kims(options)
if __name__ == "__main__":
main()
| bsd-3-clause | 6,343,213,143,473,994,000 | 49.734884 | 194 | 0.556197 | false |
f-prettyland/angr | angr/block.py | 4 | 7002 | import logging
l = logging.getLogger("angr.block")
import pyvex
from archinfo import ArchARM
from .engines import SimEngineVEX
DEFAULT_VEX_ENGINE = SimEngineVEX() # this is only used when Block is not initialized with a project
class Block(object):
BLOCK_MAX_SIZE = 4096
__slots__ = ['_project', '_bytes', '_vex', 'thumb', '_capstone', 'addr', 'size', 'arch', 'instructions',
'_instruction_addrs', '_opt_level'
]
def __init__(self, addr, project=None, arch=None, size=None, byte_string=None, vex=None, thumb=False, backup_state=None,
opt_level=None, num_inst=None, traceflags=0):
# set up arch
if project is not None:
self.arch = project.arch
else:
self.arch = arch
if self.arch is None:
raise ValueError('Either "project" or "arch" has to be specified.')
if isinstance(self.arch, ArchARM):
if addr & 1 == 1:
thumb = True
if thumb:
addr |= 1
else:
thumb = False
self._project = project
self.thumb = thumb
self.addr = addr
self._opt_level = opt_level
if self._project is None and byte_string is None:
raise ValueError('"byte_string" has to be specified if "project" is not provided.')
if size is None:
if byte_string is not None:
size = len(byte_string)
elif vex is not None:
size = vex.size
else:
vex = self._vex_engine.lift(
clemory=project.loader.memory,
state=backup_state,
insn_bytes=byte_string,
addr=addr,
thumb=thumb,
opt_level=opt_level,
num_inst=num_inst,
traceflags=traceflags)
size = vex.size
self._vex = vex
self._capstone = None
self.size = size
self.instructions = num_inst
self._instruction_addrs = []
self._parse_vex_info()
if byte_string is None:
if backup_state is not None:
self._bytes = self._vex_engine._load_bytes(addr - thumb, size, state=backup_state)[0]
if type(self._bytes) is not str:
self._bytes = str(pyvex.ffi.buffer(self._bytes, size))
else:
self._bytes = None
elif type(byte_string) is str:
if self.size is not None:
self._bytes = byte_string[:self.size]
else:
self._bytes = byte_string
else:
# Convert bytestring to a str
# size will ALWAYS be known at this point
self._bytes = str(pyvex.ffi.buffer(byte_string, self.size))
def _parse_vex_info(self):
vex = self._vex
if vex is not None:
self.instructions = vex.instructions
self._instruction_addrs = []
self.size = vex.size
for stmt in vex.statements:
if stmt.tag != 'Ist_IMark':
continue
if self.addr is None:
self.addr = stmt.addr + stmt.delta
self._instruction_addrs.append(stmt.addr + stmt.delta)
def __repr__(self):
return '<Block for %#x, %d bytes>' % (self.addr, self.size)
def __getstate__(self):
return dict((k, getattr(self, k)) for k in self.__slots__ if k not in ('_capstone', ))
def __setstate__(self, data):
for k, v in data.iteritems():
setattr(self, k, v)
def __hash__(self):
return hash((type(self), self.addr, self.bytes))
def __eq__(self, other):
return type(self) is type(other) and \
self.addr == other.addr and \
self.bytes == other.bytes
def __ne__(self, other):
return not self == other
def pp(self):
return self.capstone.pp()
@property
def _vex_engine(self):
if self._project is None:
return DEFAULT_VEX_ENGINE
else:
return self._project.factory.default_engine
@property
def vex(self):
if not self._vex:
self._vex = self._vex_engine.lift(
clemory=self._project.loader.memory if self._project is not None else None,
insn_bytes=self._bytes,
addr=self.addr,
thumb=self.thumb,
size=self.size,
num_inst=self.instructions,
opt_level=self._opt_level,
arch=self.arch,
)
self._parse_vex_info()
return self._vex
@property
def capstone(self):
if self._capstone: return self._capstone
cs = self.arch.capstone if not self.thumb else self.arch.capstone_thumb
insns = []
for cs_insn in cs.disasm(self.bytes, self.addr):
insns.append(CapstoneInsn(cs_insn))
block = CapstoneBlock(self.addr, insns, self.thumb, self.arch)
self._capstone = block
return block
@property
def codenode(self):
return BlockNode(self.addr, self.size, bytestr=self.bytes, thumb=self.thumb)
@property
def bytes(self):
if self._bytes is None:
addr = self.addr
if self.thumb:
addr = (addr >> 1) << 1
self._bytes = ''.join(self._project.loader.memory.read_bytes(addr, self.size))
return self._bytes
@property
def instruction_addrs(self):
if not self._instruction_addrs and self._vex is None:
# initialize instruction addrs
_ = self.vex
return self._instruction_addrs
class CapstoneBlock(object):
"""
Deep copy of the capstone blocks, which have serious issues with having extended lifespans
outside of capstone itself
"""
__slots__ = [ 'addr', 'insns', 'thumb', 'arch' ]
def __init__(self, addr, insns, thumb, arch):
self.addr = addr
self.insns = insns
self.thumb = thumb
self.arch = arch
def pp(self):
print str(self)
def __str__(self):
return '\n'.join(map(str, self.insns))
def __repr__(self):
return '<CapstoneBlock for %#x>' % self.addr
class CapstoneInsn(object):
def __init__(self, capstone_insn):
self.insn = capstone_insn
def __getattr__(self, item):
if item in ('__str__', '__repr__'):
return self.__getattribute__(item)
if hasattr(self.insn, item):
return getattr(self.insn, item)
raise AttributeError()
def __str__(self):
return "%#x:\t%s\t%s" % (self.address, self.mnemonic, self.op_str)
def __repr__(self):
return '<CapstoneInsn "%s" for %#x>' % (self.mnemonic, self.address)
from .codenode import BlockNode
| bsd-2-clause | -2,851,778,779,293,475,300 | 29.710526 | 124 | 0.531134 | false |
blazek/QGIS | tests/src/python/test_qgsserver_apicontext.py | 25 | 1835 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServerApiContext class.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '11/07/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
import re
# Deterministic XML
os.environ['QT_HASH_SEED'] = '1'
from qgis.server import (
QgsBufferServerRequest,
QgsBufferServerResponse,
QgsServerApiContext
)
from qgis.testing import unittest
from utilities import unitTestDataPath
from urllib import parse
import tempfile
from test_qgsserver import QgsServerTestBase
class QgsServerApiContextsTest(QgsServerTestBase):
""" QGIS Server API context tests"""
def testMatchedPath(self):
"""Test path extraction"""
response = QgsBufferServerResponse()
request = QgsBufferServerRequest("http://www.qgis.org/services/wfs3")
context = QgsServerApiContext("/wfs3", request, response, None, None)
self.assertEqual(context.matchedPath(), "/services/wfs3")
request = QgsBufferServerRequest("http://www.qgis.org/services/wfs3/collections.hml")
context = QgsServerApiContext("/wfs3", request, response, None, None)
self.assertEqual(context.matchedPath(), "/services/wfs3")
request = QgsBufferServerRequest("http://www.qgis.org/services/wfs3/collections.hml")
context = QgsServerApiContext("/wfs4", request, response, None, None)
self.assertEqual(context.matchedPath(), "")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 7,564,155,027,994,930,000 | 30.637931 | 93 | 0.712262 | false |
stopthatcow/zazu | zazu/style.py | 1 | 6180 | # -*- coding: utf-8 -*-
"""Style functions for zazu."""
import zazu.imports
zazu.imports.lazy_import(locals(), [
'click',
'difflib',
'functools',
'os',
'threading',
'sys',
'zazu.config',
'zazu.git_helper',
'zazu.styler',
'zazu.util'
])
__author__ = 'Nicholas Wiles'
__copyright__ = 'Copyright 2016'
default_exclude_paths = ['build',
'dependency',
'dependencies']
def read_file(path):
"""Read a file and return its contents as a string."""
with open(path, 'r') as f:
return f.read()
def write_file(path, _, styled_string):
"""Write styled_string string to a file."""
with open(path, 'w') as f:
return f.write(styled_string)
"""The git binary doesn't allow concurrent access, so serailize calls to it using a lock."""
git_lock = threading.Lock()
def stage_patch(path, input_string, styled_string):
"""Create a patch between input_string and output_string and add the patch to the git staging area.
Args:
path: the path of the file being patched.
input_string: the current state of the file in the git stage.
styled_string: the properly styled string to stage.
"""
# If the input was the same as the current file contents, apply the styling locally and add it.
if read_file(path) == input_string:
write_file(path, '', styled_string)
with git_lock:
zazu.util.check_output(['git', 'add', path])
else:
# The file is partially staged. We must apply a patch to the staging area.
input_lines = input_string.splitlines()
styled_lines = styled_string.splitlines()
patch = difflib.unified_diff(input_lines, styled_lines, 'a/' + path, 'b/' + path, lineterm='')
patch_string = '\n'.join(patch) + '\n'
if input_string[-1] != '\n':
# This is to address a bizarre issue with git apply whereby if the staged file doesn't end in a newline,
# the patch will fail to apply.
raise click.ClickException('File "{}" must have a trailing newline'.format(path))
with git_lock:
zazu.util.check_popen(args=['git', 'apply', '--cached', '--verbose', '-'], stdin_str=patch_string,
universal_newlines=True)
def style_file(stylers, path, read_fn, write_fn):
"""Style a file.
Args:
styler: the styler to use to style the file.
path: the file path.
read_fn: function used to read in the file contents.
write_fn: function used to write out the styled file, or None
"""
input_string = read_fn(path)
styled_string = input_string
for styler in stylers:
styled_string = styler.style_string(styled_string, path)
violation = styled_string != input_string
if violation and callable(write_fn):
write_fn(path, input_string, styled_string)
return path, stylers, violation
def styler_list(file, sets, keys):
"""Get the list of stylers to apply to a file based on the file set of each styler."""
return [s for s in keys if file in sets[s]]
@click.command()
@zazu.config.pass_config
@click.option('-v', '--verbose', is_flag=True, help='print files that are dirty')
@click.option('--check', is_flag=True, help='only check the repo for style violations, do not correct them')
@click.option('--cached', is_flag=True, help='only examine/fix files that are staged for SCM commit')
def style(config, verbose, check, cached):
"""Style repo files or check that they are valid style."""
config.check_repo()
violation_count = 0
stylers = config.stylers()
fixed_ok_tags = [click.style('FIXED', fg='red', bold=True), click.style(' OK ', fg='green', bold=True)]
tags = zazu.util.FAIL_OK if check else fixed_ok_tags
with zazu.util.cd(config.repo_root):
if stylers:
if cached:
staged_files = zazu.git_helper.get_touched_files(config.repo)
read_fn = zazu.git_helper.read_staged
write_fn = stage_patch
else:
read_fn = read_file
write_fn = write_file
if check:
write_fn = None
# Determine files for each styler.
file_sets = {}
styler_file_sets = {}
all_files = set()
for s in stylers:
includes = tuple(s.includes)
excludes = tuple(s.excludes)
if (includes, excludes) not in file_sets:
files = set(zazu.util.scantree(config.repo_root,
includes,
excludes,
exclude_hidden=True))
if cached:
files = files.intersection(staged_files)
file_sets[(includes, excludes)] = files
else:
files = file_sets[(includes, excludes)]
styler_file_sets[s] = files
all_files |= files
work = [functools.partial(style_file, styler_list(f, styler_file_sets, stylers), f, read_fn, write_fn) for f in all_files]
checked_files = zazu.util.dispatch(work)
for f, stylers, violation in checked_files:
if verbose:
click.echo(zazu.util.format_checklist_item(not violation,
text='({}) {}'.format(', '.join([s.name() for s in stylers]), f),
tag_formats=tags))
violation_count += violation
if verbose:
file_count = len(all_files)
if check:
click.echo('{} files with violations in {} files'.format(violation_count, file_count))
else:
click.echo('{} files fixed in {} files'.format(violation_count, file_count))
sys.exit(-1 if check and violation_count else 0)
else:
click.echo('no style settings found')
| mit | -1,057,187,777,035,833,600 | 38.870968 | 134 | 0.559871 | false |
Viktor-Evst/fixed-luigi | test/snakebite_test.py | 25 | 3738 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import posixpath
import time
import unittest
import luigi.target
from luigi import six
from nose.plugins.attrib import attr
if six.PY3:
raise unittest.SkipTest("snakebite doesn't work on Python 3 yet.")
try:
from luigi.contrib.hdfs import SnakebiteHdfsClient
from minicluster import MiniClusterTestCase
except ImportError:
raise unittest.SkipTest('Snakebite not installed')
@attr('minicluster')
class TestSnakebiteClient(MiniClusterTestCase):
"""This test requires a snakebite -- it finds it from your
luigi.cfg"""
snakebite = None
def get_client(self):
return SnakebiteHdfsClient()
def setUp(self):
""" We override setUp because we want to also use snakebite for
creating the testing directory. """
self.testDir = "/tmp/luigi-test-{0}-{1}".format(
os.environ["USER"],
time.mktime(datetime.datetime.now().timetuple())
)
self.snakebite = self.get_client()
self.assertTrue(self.snakebite.mkdir(self.testDir))
def tearDown(self):
if self.snakebite.exists(self.testDir):
self.snakebite.remove(self.testDir, True)
def test_exists(self):
self.assertTrue(self.snakebite.exists(self.testDir))
def test_rename(self):
foo = posixpath.join(self.testDir, "foo")
bar = posixpath.join(self.testDir, "bar")
self.assertTrue(self.snakebite.mkdir(foo))
self.assertTrue(self.snakebite.rename(foo, bar))
self.assertTrue(self.snakebite.exists(bar))
def test_rename_trailing_slash(self):
foo = posixpath.join(self.testDir, "foo")
bar = posixpath.join(self.testDir, "bar/")
self.assertTrue(self.snakebite.mkdir(foo))
self.assertTrue(self.snakebite.rename(foo, bar))
self.assertTrue(self.snakebite.exists(bar))
self.assertFalse(self.snakebite.exists(posixpath.join(bar, 'foo')))
def test_relativepath(self):
rel_test_dir = "." + os.path.split(self.testDir)[1]
try:
self.assertFalse(self.snakebite.exists(rel_test_dir))
self.snakebite.mkdir(rel_test_dir)
self.assertTrue(self.snakebite.exists(rel_test_dir))
finally:
if self.snakebite.exists(rel_test_dir):
self.snakebite.remove(rel_test_dir, True)
def test_rename_dont_move(self):
foo = posixpath.join(self.testDir, "foo")
bar = posixpath.join(self.testDir, "bar")
self.assertTrue(self.snakebite.mkdir(foo))
self.assertTrue(self.snakebite.mkdir(bar))
self.assertTrue(self.snakebite.exists(foo)) # For sanity
self.assertTrue(self.snakebite.exists(bar)) # For sanity
self.assertRaises(luigi.target.FileAlreadyExists,
lambda: self.snakebite.rename_dont_move(foo, bar))
self.assertTrue(self.snakebite.exists(foo))
self.assertTrue(self.snakebite.exists(bar))
self.snakebite.rename_dont_move(foo, foo + '2')
self.assertFalse(self.snakebite.exists(foo))
self.assertTrue(self.snakebite.exists(foo + '2'))
| apache-2.0 | 2,889,323,768,037,426,000 | 34.6 | 76 | 0.67496 | false |
Wikidata/QueryAnalysis | tools/getSparqlStatistic.py | 1 | 3883 | import argparse
import os
import sys
from collections import defaultdict
from pprint import pprint
import config
from postprocess import processdata
from utility import utility
parser = argparse.ArgumentParser(description="Prints out the SPARQL statistic")
parser.add_argument(
"--monthsFolder",
"-m",
default=config.monthsFolder,
type=str,
help="the folder in which the months directory " + "are residing")
parser.add_argument(
"--ignoreLock",
"-i",
help="Ignore locked file and execute" + " anyways",
action="store_true")
parser.add_argument(
"--position",
"-p",
default="default position",
type=str,
help="The position to be displayed before the data.")
parser.add_argument(
"month", type=str, help="the month which we're interested in")
if (len(sys.argv[1:]) == 0):
parser.print_help()
parser.exit()
args = parser.parse_args()
if os.path.isfile(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(args.month) + "locked") \
and not args.ignoreLock:
print("ERROR: The month " + str(args.month) +
" is being edited at the moment." +
" Use -i if you want to force the execution of this script.")
sys.exit()
class SparqlStatisticHandler:
statistic = defaultdict(int)
totalCount = 0
def handle(self, sparqlQuery, processed):
if (processed['#Valid'] == 'VALID'):
self.totalCount += 1
usedSparqlFeatures = processed['#UsedSparqlFeatures']
for usedSparqlFeature in usedSparqlFeatures.split(","):
self.statistic[usedSparqlFeature.lstrip()] += 1
def printKeys(self, keys):
result = ""
i = 1
for featureName in keys:
featureCount = self.statistic[featureName]
# result += featureName + ": " + str(featureCount) + "\n"
result += str(featureCount) + "\n"
i += 1
print(result)
def printSparqlTranslation(self):
self.statistic["Select"] = self.statistic["SelectQuery"]
self.statistic["Ask"] = self.statistic["AskQuery"]
self.statistic["Describe"] = self.statistic["DescribeQuery"]
self.statistic["Construct"] = self.statistic["ConstructQuery"]
self.statistic["Order By"] = self.statistic["OrderClause"]
self.statistic["Union"] = self.statistic["UnionGraphPattern"]
self.statistic["Optional"] = self.statistic["OptionalGraphPattern"]
self.statistic["Not Exists"] = self.statistic["NotExistsFunc"]
self.statistic["Minus"] = self.statistic["MinusGraphPattern"]
self.statistic["Exists"] = self.statistic["ExistsFunc"]
self.statistic["Group By"] = self.statistic["GroupClause"]
self.statistic["Having"] = self.statistic["HavingClause"]
self.statistic["Service"] = self.statistic["ServiceGraphPattern"]
self.statistic["And"] = self.statistic["Join"]
self.statistic["Values"] = self.statistic["BindingValue"]
self.statistic["'+"] = self.statistic["+"]
self.statistic["Subquery"] = self.statistic["SubSelect"]
# only print specified columns
toPrintKeys = [
"Select", "Ask", "Describe", "Construct", "Distinct", "Limit",
"Offset", "Order By", "Filter", "And", "Union", "Optional",
"Graph", "Not Exists", "Minus", "Exists", "Count", "Max", "Min",
"Avg", "Sum", "Group By", "Having", "Service", "LangService",
"Sample", "Bind", "GroupConcat", "Reduced", "Values", "'+", "*",
"Subquery"
]
self.printKeys(toPrintKeys)
print(" ")
print(str(self.totalCount))
handler = SparqlStatisticHandler()
processdata.processMonth(
handler, args.month, args.monthsFolder, notifications=False)
print args.position
handler.printSparqlTranslation()
| apache-2.0 | -3,312,735,612,141,559,000 | 33.061404 | 79 | 0.63044 | false |
alunarbeach/spark | examples/src/main/python/streaming/recoverable_network_wordcount.py | 80 | 4423 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in text encoded with UTF8 received from the network every second.
Usage: recoverable_network_wordcount.py <hostname> <port> <checkpoint-directory> <output-file>
<hostname> and <port> describe the TCP server that Spark Streaming would connect to receive
data. <checkpoint-directory> directory to HDFS-compatible file system which checkpoint data
<output-file> file to which the word counts will be appended
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/streaming/recoverable_network_wordcount.py \
localhost 9999 ~/checkpoint/ ~/out`
If the directory ~/checkpoint/ does not exist (e.g. running for the first time), it will create
a new StreamingContext (will print "Creating new context" to the console). Otherwise, if
checkpoint data exists in ~/checkpoint/, then it will create StreamingContext from
the checkpoint data.
"""
from __future__ import print_function
import os
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
# Get or register a Broadcast variable
def getWordBlacklist(sparkContext):
if ('wordBlacklist' not in globals()):
globals()['wordBlacklist'] = sparkContext.broadcast(["a", "b", "c"])
return globals()['wordBlacklist']
# Get or register an Accumulator
def getDroppedWordsCounter(sparkContext):
if ('droppedWordsCounter' not in globals()):
globals()['droppedWordsCounter'] = sparkContext.accumulator(0)
return globals()['droppedWordsCounter']
def createContext(host, port, outputPath):
# If you do not see this printed, that means the StreamingContext has been loaded
# from the new checkpoint
print("Creating new context")
if os.path.exists(outputPath):
os.remove(outputPath)
sc = SparkContext(appName="PythonStreamingRecoverableNetworkWordCount")
ssc = StreamingContext(sc, 1)
# Create a socket stream on target ip:port and count the
# words in input stream of \n delimited text (eg. generated by 'nc')
lines = ssc.socketTextStream(host, port)
words = lines.flatMap(lambda line: line.split(" "))
wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
def echo(time, rdd):
# Get or register the blacklist Broadcast
blacklist = getWordBlacklist(rdd.context)
# Get or register the droppedWordsCounter Accumulator
droppedWordsCounter = getDroppedWordsCounter(rdd.context)
# Use blacklist to drop words and use droppedWordsCounter to count them
def filterFunc(wordCount):
if wordCount[0] in blacklist.value:
droppedWordsCounter.add(wordCount[1])
False
else:
True
counts = "Counts at time %s %s" % (time, rdd.filter(filterFunc).collect())
print(counts)
print("Dropped %d word(s) totally" % droppedWordsCounter.value)
print("Appending to " + os.path.abspath(outputPath))
with open(outputPath, 'a') as f:
f.write(counts + "\n")
wordCounts.foreachRDD(echo)
return ssc
if __name__ == "__main__":
if len(sys.argv) != 5:
print("Usage: recoverable_network_wordcount.py <hostname> <port> "
"<checkpoint-directory> <output-file>", file=sys.stderr)
exit(-1)
host, port, checkpoint, output = sys.argv[1:]
ssc = StreamingContext.getOrCreate(checkpoint,
lambda: createContext(host, int(port), output))
ssc.start()
ssc.awaitTermination()
| apache-2.0 | -5,087,141,895,871,788,000 | 39.577982 | 96 | 0.699977 | false |
Zing22/uemscode | tmp_test.py | 1 | 1634 | # -*- coding=utf-8 -*-
#### for testing steps
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.externals import joblib
from PIL import Image
from process import toBin, cropLetters
from img2feature import toFeature
from main import readAllFiles
TEMP_DIR = 'tmp/'
def test_onePic():
path = input('Pic path:')
img = Image.open(path)
bimg = toBin(img)
bimg.save(TEMP_DIR+ 'bimg.jpg')
success, letters = cropLetters(bimg)
if not success:
print('Crop failed.')
print(letters)
return
features = []
for l in letters:
features.append([int(x) for x in toFeature(l).split(' ')])
l.save(TEMP_DIR + '%d.jpg' % len(features))
pre = clf.predict(features)
code = ''.join([chr(x + ord('A')) for x in pre])
print(code)
def test_tmp_dir():
filenames = readAllFiles(TEMP_DIR)
for file in filenames:
img = Image.open(TEMP_DIR + file)
bimg = toBin(img)
bimg.save(TEMP_DIR + 'tmp_' + file)
success, letters = cropLetters(bimg)
if not success:
print('Crop failed.')
print(letters)
return
features = []
for l in letters:
features.append([int(x) for x in toFeature(l).split(' ')])
# l.save(TEMP_DIR + '%d.jpg' % len(features))
pre = clf.predict(features)
code = ''.join([chr(x + ord('A')) for x in pre])
print(code)
SAVE_TO = 'model.pkl'
def main():
global clf
clf = joblib.load(SAVE_TO)
test_onePic()
# test_tmp_dir()
if __name__ == '__main__':
main() | mit | 4,342,402,305,400,115,000 | 22.028169 | 70 | 0.578947 | false |
hoatle/odoo | addons/hw_proxy/__openerp__.py | 313 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,480,024,799,125,620,000 | 33.895833 | 78 | 0.619701 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.