filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_10201 | """initial
Revision ID: ca4351944ed4
Revises:
Create Date: 2018-12-16 17:28:04.537922
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ca4351944ed4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('info_category',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('info_user',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('nick_name', sa.String(length=32), nullable=False),
sa.Column('password_hash', sa.String(length=128), nullable=False),
sa.Column('mobile', sa.String(length=11), nullable=False),
sa.Column('avatar_url', sa.String(length=256), nullable=True),
sa.Column('last_login', sa.DateTime(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('signature', sa.String(length=512), nullable=True),
sa.Column('gender', sa.Enum('MAN', 'WOMAN'), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('mobile'),
sa.UniqueConstraint('nick_name')
)
op.create_table('info_news',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=256), nullable=False),
sa.Column('source', sa.String(length=64), nullable=False),
sa.Column('digest', sa.String(length=512), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('clicks', sa.Integer(), nullable=True),
sa.Column('comments_count', sa.Integer(), nullable=True),
sa.Column('index_image_url', sa.String(length=256), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('reason', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['info_category.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('info_user_fans',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['followed_id'], ['info_user.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
op.create_table('info_comment',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('news_id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('like_count', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['news_id'], ['info_news.id'], ),
sa.ForeignKeyConstraint(['parent_id'], ['info_comment.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('info_user_collection',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('news_id', sa.Integer(), nullable=False),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['news_id'], ['info_news.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'news_id')
)
op.create_table('info_comment_like',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('comment_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['comment_id'], ['info_comment.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('comment_id', 'user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('info_comment_like')
op.drop_table('info_user_collection')
op.drop_table('info_comment')
op.drop_table('info_user_fans')
op.drop_table('info_news')
op.drop_table('info_user')
op.drop_table('info_category')
# ### end Alembic commands ###
|
the-stack_0_10202 | #!/usr/bin/python
# encoding: utf-8
from __future__ import unicode_literals
import os
import argparse
import plistlib
import sys
import sqlite3
from sqlite3 import Error
from workflow import Workflow3, ICON_INFO, ICON_WARNING, ICON_ERROR
KM_APP_SUPPORT = os.path.expanduser("~/Library/Application Support/Keyboard Maestro/")
KM_APP_RESOURCES = "/System/Volumes/Data/Applications/Keyboard Maestro.app/Contents/Resources/"
VARS_DB = KM_APP_SUPPORT + "Keyboard Maestro Variables.sqlite"
CLIPS_PLIST = KM_APP_SUPPORT + "Keyboard Maestro Clipboards.plist"
ICON_KM_VAR = KM_APP_RESOURCES + "Variable.icns"
ICON_KM_CLIP = KM_APP_RESOURCES + "ClipboardIcon.icns"
wf = None
log = None
# noinspection PyProtectedMember
def main(wf):
parser = argparse.ArgumentParser()
parser.add_argument('-v', dest='vars', action='store_true')
parser.add_argument('-c', dest='clips', action='store_true')
parser.add_argument('query', nargs='?', default=None)
args = parser.parse_args(wf.args)
if args.vars:
sql = "SELECT name, value FROM variables WHERE value IS NOT '%Delete%';"
# create a database connection
conn = create_connection(VARS_DB)
with conn:
log.info("query: " + sql)
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
name = row[0]
value = row[1]
if len(value) < 100:
sub = value
else:
sub = 'press ↩︎ to view in window'
it = wf.add_item(uid=value,
title=name,
subtitle=sub,
arg=[name,value],
autocomplete=name,
valid=True,
icon=ICON_KM_VAR,
icontype="filepath",
quicklookurl=value)
it.add_modifier('cmd', subtitle="delete '" + name + "'", arg=[name,value], valid=True)
elif args.clips:
clips_pl = plistlib.readPlist(CLIPS_PLIST)
for clip in clips_pl:
name = clip['Name']
uid = clip['UID']
it = wf.add_item(uid=uid,
title=name,
subtitle='press ↩︎ to view',
arg=[name, uid],
autocomplete=name,
valid=True,
icon=ICON_KM_CLIP,
icontype="filepath",
quicklookurl=ICON_KM_CLIP)
if len(wf._items) == 0:
wf.add_item('No items found', icon=ICON_WARNING)
wf.send_feedback()
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
if __name__ == '__main__':
wf = Workflow3()
log = wf.logger
sys.exit(wf.run(main))
|
the-stack_0_10203 | import unittest
from test import support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import sys
import warnings
import collections
import collections.abc
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps:
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of set items.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), '{set(...)}')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s({%s(...)})' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(support.TESTFN, "w")
try:
fo.write(str(s))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.thetype)
class TestSet(TestJointOps, unittest.TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps, unittest.TestCase):
thetype = frozenset
basetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(range(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in range(n)]
results = set()
for i in range(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = list(range(10)) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in range(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
basetype = frozenset
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(range(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
fo.write(str(self.set))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.set, proto)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{(0, 'zero')}"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "bytes set"
self.values = [b"a", b"b", b"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self._warning_filters = support.check_warnings()
self._warning_filters.__enter__()
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def tearDown(self):
self._warning_filters.__exit__(None, None, None)
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets:
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets, unittest.TestCase):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets, unittest.TestCase):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets, unittest.TestCase):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps:
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
def test_iter_and_mutate(self):
# Issue #24581
s = set(range(100))
s.clear()
s.update(range(100))
si = iter(s)
s.clear()
a = list(range(100))
s.update(range(100))
list(si)
def test_merge_and_mutate(self):
class X:
def __hash__(self):
return hash(0)
def __eq__(self, o):
other.clear()
return False
other = set()
other = {X() for i in range(10)}
s = {0}
s.update(other)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([next(U)])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 identical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
if __name__ == "__main__":
unittest.main()
|
the-stack_0_10204 | from dash_labs.templates.base import BaseTemplate
import dash_html_components as html
class HtmlCard(BaseTemplate):
"""
Simple template that places all components in a few html Div elements with a
card-like border.
"""
_valid_locations = ("bottom", "top")
_default_input_location = "bottom"
_default_output_location = "top"
def __init__(self, app, title=None, width=None):
super().__init__(app)
self.title = title
self.width = width
def _perform_layout(self):
# No callbacks here. Must be constant or idempotent
children = []
if self.title:
children.append(html.H2(self.title))
children.append(html.Div(self.get_containers("top")))
children.append(html.Hr())
children.append(html.Div(self.get_containers("bottom")))
layout = html.Div(
style={
"width": self.width,
"border": "1px solid lightgray",
"padding": 10,
"border-radius": "6px",
},
children=html.Div(children=children),
)
return layout
|
the-stack_0_10208 | #!/usr/bin/env python3
import sys
import os
import argparse
def parseArguments():
parser = argparse.ArgumentParser(description='transform file and header')
parser.add_argument("--list_file", help="", type=str,required=True)
parser.add_argument('--use_rs',type=str,help="if need to be limited at some rs", default=0)
parser.add_argument("--out", help="output format ldsc, default none", type=str,required=True)
args = parser.parse_args()
return args
args=parseArguments()
splfile=args.list_file.split(',')
DicByRs={}
listRs=list([])
listChrBp={}
rsissue=''
listrsissue=list([])
listchrissue=list([])
for File in splfile :
print(File)
Fread=open(File)
FreadL=Fread.readline().split()
Fread.close()
Fread=open(File)
if len(FreadL)==3 :
for line in Fread :
splt=line.replace('\n', '').split()
if splt[0] not in listRs :
DicByRs[splt[0]]=[None,None,splt[1],splt[2],None]
else :
RsInfo=DirRes[splt[0]]
##
print(RsInfo)
balisegood= (splt[1]==RsInfo[2] and splt[2]==RsInfo[3]) or (splt[1]==RsInfo[3] and splt[2]==RsInfo[2])
if balisegood ==False:
listrsissue.add(splt[1])
elif len(FreadL)==6:
# writenew.write('rsID\tChro\tPos\tA1\tA2\tnewRs\n')
for line in Fread :
splt=line.replace('\n', '').split()
NewRs=splt[5]
if splt[0] not in listRs :
DicByRs[splt[0]]=[splt[1],splt[2],splt[3],splt[4], splt[5]]
else :
balisegood= (splt[1]==RsInfo[2] and splt[2]==RsInfo[3]) or (splt[1]==RsInfo[3] and splt[2]==RsInfo[2])
RsInfo=DirRes[splt[0]]
if balisegood ==False:
listrsissue.add(splt[1])
listchrissue.add()
# check pos and chr
if RsInfo[0] :
if RsInfo[0] != splt[1] and RsInfo[1] != splt[2] :
listrsissue.add(splt[0])
else :
RsInfo[0]=splt[1]
RsInfo[1]=splt[2]
RsInfo[4]=splt[5]
else :
print("colomn error number :"+str(len(FreadL)))
sys.exit(3)
writeRs=open(args.out, 'w')
writeRs2=open(args.out+'_allinfo', 'w')
for rs in DicByRs:
RsInfo=DicByRs[rs]
if rs not in listrsissue :
if args.use_rs==1 :
writeRs.write(rs+'\t'+RsInfo[3]+'\t'+RsInfo[4]+'\n')
else :
writeRs.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
writeRs2.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
writeRsError=open(args.out+'_issue', 'w')
for rs in listrsissue :
RsInfo=DicByRs[rs]
writeRs.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
|
the-stack_0_10209 | #
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Maple OLT/ONU adapter.
"""
from uuid import uuid4
import arrow
import binascii
from scapy.layers.l2 import Ether, Dot1Q
from twisted.internet import reactor
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.spread import pb
from twisted.internet.defer import inlineCallbacks, returnValue, DeferredQueue
from zope.interface import implementer
from common.frameio.frameio import BpfProgramFilter, hexify
from voltha.adapters.interface import IAdapterInterface
from voltha.core.logical_device_agent import mac_str_to_tuple
import voltha.core.flow_decomposer as fd
from voltha.protos import third_party
from voltha.protos.adapter_pb2 import Adapter
from voltha.protos.adapter_pb2 import AdapterConfig
from voltha.protos.common_pb2 import LogLevel, OperStatus, ConnectStatus, \
AdminState
from voltha.protos.device_pb2 import DeviceType, DeviceTypes, Port, Device, \
PmConfigs, PmConfig, PmGroupConfig
from voltha.protos.health_pb2 import HealthStatus
from google.protobuf.empty_pb2 import Empty
from voltha.protos.events_pb2 import KpiEvent, MetricValuePairs
from voltha.protos.events_pb2 import KpiEventType
from voltha.protos.events_pb2 import AlarmEvent, AlarmEventType, \
AlarmEventSeverity, AlarmEventState, AlarmEventCategory
from voltha.protos.logical_device_pb2 import LogicalPort, LogicalDevice
from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
OFPPF_1GB_FD, OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, \
OFPC_FLOW_STATS, OFPP_CONTROLLER, OFPXMC_OPENFLOW_BASIC, \
ofp_switch_features, ofp_desc, ofp_port
from voltha.registry import registry
from voltha.extensions.omci.omci import *
_ = third_party
log = structlog.get_logger()
PACKET_IN_VLAN = 4091
is_inband_frame = BpfProgramFilter('(ether[14:2] & 0xfff) = 0x{:03x}'.format(
PACKET_IN_VLAN))
class MapleOltPmMetrics:
class Metrics:
def __init__(self, config, value=0, is_group=False):
self.config = config
self.value = value
self.is_group = is_group
def __init__(self,device):
self.pm_names = {'tx_64','tx_65_127', 'tx_128_255', 'tx_256_511',
'tx_512_1023', 'tx_1024_1518', 'tx_1519_9k', 'rx_64',
'rx_65_127', 'rx_128_255', 'rx_256_511', 'rx_512_1023',
'rx_1024_1518', 'rx_1519_9k', 'tx_pkts', 'rx_pkts',
'tx_bytes', 'rx_bytes'}
self.pm_group_names = {'nni'}
self.device = device
self.id = device.id
self.default_freq = 150
self.pon_metrics = dict()
self.nni_metrics = dict()
for m in self.pm_names:
self.pon_metrics[m] = \
self.Metrics(config = PmConfig(name=m,
type=PmConfig.COUNTER,
enabled=True), value = 0)
self.nni_metrics[m] = \
self.Metrics(config = PmConfig(name=m,
type=PmConfig.COUNTER,
enabled=True), value = 0)
self.pm_group_metrics = dict()
for m in self.pm_group_names:
self.pm_group_metrics[m] = \
self.Metrics(config = PmGroupConfig(group_name=m,
group_freq=self.default_freq,
enabled=True),
is_group = True)
for m in sorted(self.nni_metrics):
pm=self.nni_metrics[m]
self.pm_group_metrics['nni'].config.metrics.extend([PmConfig(
name=pm.config.name,
type=pm.config.type,
enabled=pm.config.enabled)])
@inlineCallbacks
def configure_pm_collection_freq(self, freq, remote):
log.info('configuring-pm-collection-freq',
freq=freq)
try:
data = yield remote.callRemote('set_stats_collection_interval', 0,
freq)
log.info('configured-pm-collection-freq', data=data)
except Exception as e:
log.exception('configure-pm-collection-freq', exc=str(e))
def enable_pm_collection(self, pm_group, remote):
if pm_group == 'nni':
self.configure_pm_collection_freq(self.default_freq/10, remote)
def disable_pm_collection(self, pm_group, remote):
if pm_group == 'nni':
self.configure_pm_collection_freq(0, remote)
def update(self, device, pm_config, remote):
if self.default_freq != pm_config.default_freq:
self.default_freq = pm_config.default_freq
if pm_config.grouped is True:
for m in pm_config.groups:
self.pm_group_metrics[m.group_name].config.enabled = m.enabled
if m.enabled is True:
self.enable_pm_collection(m.group_name, remote)
else:
self.disable_pm_collection(m.group_name, remote)
else:
for m in pm_config.metrics:
self.pon_metrics[m.name].config.enabled = m.enabled
self.nni_metrics[m.name].config.enabled = m.enabled
def make_proto(self):
pm_config = PmConfigs(
id=self.id,
default_freq=self.default_freq,
grouped = True,
freq_override = False)
for m in self.pm_group_names:
pm_config.groups.extend([self.pm_group_metrics[m].config])
return pm_config
class MapleOltRxHandler(pb.Root):
def __init__(self, device_id, adapter, onu_queue):
self.device_id = device_id
self.adapter = adapter
self.onu_discovered_queue = onu_queue
self.adapter_agent = adapter.adapter_agent
self.adapter_name = adapter.name
# registry('main').get_args().external_host_address
self.pb_server_ip = '192.168.24.20'
self.pb_server_port = 24497
self.pb_server_factory = pb.PBServerFactory(self)
# start PB server
self.listen_port = reactor.listenTCP(self.pb_server_port,
self.pb_server_factory)
self.omci_rx_queue = DeferredQueue()
log.info('PB-server-started-on-port', port=self.pb_server_port)
def get_ip(self):
return self.pb_server_ip
def get_port(self):
return self.pb_server_port
def get_host(self):
return self.listen_port.getHost()
def remote_echo(self, pkt_type, pon, onu, port, crc_ok, msg_size, msg_data):
log.info('received-omci-msg',
pkt_type=pkt_type,
pon_id=pon,
onu_id=onu,
port_id=port,
crc_ok=crc_ok,
msg_size=msg_size,
msg_data=hexify(msg_data))
self.omci_rx_queue.put((onu, msg_data))
def receive_omci_msg(self):
return self.omci_rx_queue.get()
def remote_report_stats(self, _object, key, stats_data):
log.info('received-stats-msg',
object=_object,
key=key,
stats=stats_data)
prefix = 'voltha.{}.{}'.format(self.adapter_name, self.device_id)
try:
ts = arrow.utcnow().timestamp
prefixes = {
prefix + '.nni': MetricValuePairs(metrics=stats_data)
}
kpi_event = KpiEvent(
type=KpiEventType.slice,
ts=ts,
prefixes=prefixes
)
self.adapter_agent.submit_kpis(kpi_event)
except Exception as e:
log.exception('failed-to-submit-kpis', e=e)
def remote_report_event(self, _object, key, event, event_data=None):
def _convert_serial_data(data):
b = bytearray()
b.extend(data)
return binascii.hexlify(b)
log.info('received-event-msg',
object=_object,
key=key,
event_str=event,
event_data=event_data)
if _object == 'device':
# key: {'device_id': <int>}
# event: 'state-changed'
# event_data: {'state_change_successful': <False|True>,
# 'new_state': <str> ('active-working'|'inactive')}
pass
elif _object == 'nni':
# key: {'device_id': <int>, 'nni': <int>}
pass
elif _object == 'pon_ni':
# key: {'device_id': <int>, 'pon_ni': <int>}
# event: 'state-changed'
# event_data: {'state_change_successful': <False|True>,
# 'new_state': <str> ('active-working'|'inactive')}
#
# event: 'onu-discovered'
# event_data: {'serial_num_vendor_id': <str>
# 'serial_num_vendor_specific': <str>
# 'ranging_time': <int>
# 'onu_id': <int>
# 'us_line_rate': <int> (0=2.5G, 1=10G)
# 'ds_pon_id': <int>
# 'us_pon_id': <int>
# 'tuning_granularity': <int>
# 'step_tuning_time': <int>
# 'attenuation': <int>
# 'power_levelling_caps': <int>}
if 'onu-discovered' == event and event_data is not None:
event_data['_device_id'] = key['device_id'] if 'device_id' in key else None
event_data['_pon_id'] = key['pon_id'] if 'pon_id' in key else None
event_data['_vendor_id'] = _convert_serial_data(event_data['serial_num_vendor_id']) \
if 'serial_num_vendor_id' in event_data else None
event_data['_vendor_specific'] = _convert_serial_data(event_data['serial_num_vendor_specific']) \
if 'serial_num_vendor_specific' in event_data else None
self.onu_discovered_queue.put(event_data)
log.info('onu-discovered-event-added-to-queue', event_data=event_data)
elif _object == 'onu':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>}
# event: 'activation-completed'
# event_data: {'activation_successful': <False|True>,
# act_fail_reason': <str>}
#
# event: 'deactivation-completed'
# event_data: {'deactivation_successful': <False|True>}
#
# event: 'ranging-completed'
# event_data: {'ranging_successful': <False|True>,
# 'ranging_fail_reason': <str>,
# 'eqd': <int>,
# 'number_of_ploams': <int>,
# 'power_level': <int>}
#
# event: 'enable-completed'
# event_data: {'serial_num-vendor_id': <str>
# 'serial_num-vendor_specific: <str>}
#
# event: 'disable-completed'
# event_data: {'serial_num-vendor_id': <str>
# 'serial_num-vendor_specific: <str>}
# Get child_device from onu_id
child_device = self.adapter_agent.get_child_device(self.device_id, onu_id=key['onu_id'])
assert child_device is not None
# Build the message, the ONU adapter uses the proxy_address
# to uniquely identify a specific ONU
msg = {'proxy_address':child_device.proxy_address, 'event':event, 'event_data':event_data}
# Send the event message to the ONU adapter
self.adapter_agent.publish_inter_adapter_message(child_device.id, msg)
elif _object == 'alloc_id':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>, 'alloc_id': ,<int>}
pass
elif _object == 'gem_port':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>, 'gem_port': ,<int>}
pass
elif _object == 'trx':
# key: {'device_id': <int>, 'pon_ni': <int>}
pass
elif _object == 'flow_map':
# key: {'device_id': <int>, 'pon_ni': <int>}
pass
def remote_report_alarm(self, _object, key, alarm, status, priority,
alarm_data=None):
log.info('received-alarm-msg',
object=_object,
key=key,
alarm=alarm,
status=status,
priority=priority,
alarm_data=alarm_data)
id = 'voltha.{}.{}.{}'.format(self.adapter_name, self.device_id, _object)
description = '{} Alarm - {} - {}'.format(_object.upper(), alarm.upper(),
'Raised' if status else 'Cleared')
if priority == 'low':
severity = AlarmEventSeverity.MINOR
elif priority == 'medium':
severity = AlarmEventSeverity.MAJOR
elif priority == 'high':
severity = AlarmEventSeverity.CRITICAL
else:
severity = AlarmEventSeverity.INDETERMINATE
try:
ts = arrow.utcnow().timestamp
alarm_event = self.adapter_agent.create_alarm(
id=id,
resource_id=str(key),
type=AlarmEventType.EQUIPMENT,
category=AlarmEventCategory.PON,
severity=severity,
state=AlarmEventState.RAISED if status else AlarmEventState.CLEARED,
description=description,
context=alarm_data,
raised_ts = ts)
self.adapter_agent.submit_alarm(self.device_id, alarm_event)
except Exception as e:
log.exception('failed-to-submit-alarm', e=e)
# take action based on alarm type, only pon_ni and onu objects report alarms
if object == 'pon_ni':
# key: {'device_id': <int>, 'pon_ni': <int>}
# alarm: 'los'
# status: <False|True>
pass
elif object == 'onu':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>}
# alarm: <'los'|'lob'|'lopc_miss'|'los_mic_err'|'dow'|'sf'|'sd'|'suf'|'df'|'tiw'|'looc'|'dg'>
# status: <False|True>
pass
@implementer(IAdapterInterface)
class MapleOltAdapter(object):
name = 'maple_olt'
supported_device_types = [
DeviceType(
id=name,
adapter=name,
accepts_bulk_flow_update=True
)
]
def __init__(self, adapter_agent, config):
self.adapter_agent = adapter_agent
self.config = config
self.descriptor = Adapter(
id=self.name,
vendor='Voltha project',
version='0.4',
config=AdapterConfig(log_level=LogLevel.INFO)
)
self.devices_handlers = dict() # device_id -> MapleOltHandler()
self.logical_device_id_to_root_device_id = dict()
# register for adapter messages
self.adapter_agent.register_for_inter_adapter_messages()
def start(self):
log.debug('starting')
log.info('started')
def stop(self):
log.debug('stopping')
log.info('stopped')
def adapter_descriptor(self):
return self.descriptor
def device_types(self):
return DeviceTypes(items=self.supported_device_types)
def health(self):
return HealthStatus(state=HealthStatus.HealthState.HEALTHY)
def change_master_state(self, master):
raise NotImplementedError()
def update_pm_config(self, device, pm_config):
log.info("adapter-update-pm-config", device=device, pm_config=pm_config)
handler = self.devices_handlers[device.id]
handler.update_pm_metrics(device, pm_config)
def adopt_device(self, device):
log.info("adopt-device", device=device)
self.devices_handlers[device.id] = MapleOltHandler(self, device.id)
reactor.callLater(0, self.devices_handlers[device.id].activate, device)
return device
def reconcile_device(self, device):
raise NotImplementedError()
def abandon_device(self, device):
raise NotImplementedError()
def disable_device(self, device):
raise NotImplementedError()
def reenable_device(self, device):
raise NotImplementedError()
def reboot_device(self, device):
raise NotImplementedError()
def download_image(self, device, request):
raise NotImplementedError()
def get_image_download_status(self, device, request):
raise NotImplementedError()
def cancel_image_download(self, device, request):
raise NotImplementedError()
def activate_image_update(self, device, request):
raise NotImplementedError()
def revert_image_update(self, device, request):
raise NotImplementedError()
def self_test_device(self, device):
"""
This is called to Self a device based on a NBI call.
:param device: A Voltha.Device object.
:return: Will return result of self test
"""
log.info('self-test-device', device=device.id)
raise NotImplementedError()
def delete_device(self, device):
raise NotImplementedError()
def get_device_details(self, device):
raise NotImplementedError()
def update_flows_bulk(self, device, flows, groups):
log.info('bulk-flow-update', device_id=device.id,
flows=flows, groups=groups)
assert len(groups.items) == 0, "Cannot yet deal with groups"
handler = self.devices_handlers[device.id]
return handler.update_flow_table(flows.items, device)
def update_flows_incrementally(self, device, flow_changes, group_changes):
raise NotImplementedError()
def send_proxied_message(self, proxy_address, msg):
log.info('send-proxied-message', proxy_address=proxy_address, msg=msg)
handler = self.devices_handlers[proxy_address.device_id]
handler.send_proxied_message(proxy_address, msg)
def receive_proxied_message(self, proxy_address, msg):
raise NotImplementedError()
def receive_packet_out(self, logical_device_id, egress_port_no, msg):
def ldi_to_di(ldi):
di = self.logical_device_id_to_root_device_id.get(ldi)
if di is None:
logical_device = self.adapter_agent.get_logical_device(ldi)
di = logical_device.root_device_id
self.logical_device_id_to_root_device_id[ldi] = di
return di
device_id = ldi_to_di(logical_device_id)
handler = self.devices_handlers[device_id]
handler.packet_out(egress_port_no, msg)
def receive_inter_adapter_message(self, msg):
pass
def create_interface(self, device, data):
raise NotImplementedError()
def update_interface(self, device, data):
raise NotImplementedError()
def remove_interface(self, device, data):
raise NotImplementedError()
def receive_onu_detect_state(self, device_id, state):
raise NotImplementedError()
def create_tcont(self, device, tcont_data, traffic_descriptor_data):
raise NotImplementedError()
def update_tcont(self, device, tcont_data, traffic_descriptor_data):
raise NotImplementedError()
def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
raise NotImplementedError()
def create_gemport(self, device, data):
raise NotImplementedError()
def update_gemport(self, device, data):
raise NotImplementedError()
def remove_gemport(self, device, data):
raise NotImplementedError()
def create_multicast_gemport(self, device, data):
raise NotImplementedError()
def update_multicast_gemport(self, device, data):
raise NotImplementedError()
def remove_multicast_gemport(self, device, data):
raise NotImplementedError()
def create_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def update_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def remove_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def suppress_alarm(self, filter):
raise NotImplementedError()
def unsuppress_alarm(self, filter):
raise NotImplementedError()
class MaplePBClientFactory(pb.PBClientFactory, ReconnectingClientFactory):
channel = None
maxDelay = 60
initialDelay = 15
def clientConnectionMade(self, broker):
log.info('pb-client-connection-made')
pb.PBClientFactory.clientConnectionMade(self, broker)
ReconnectingClientFactory.resetDelay(self)
def clientConnectionLost(self, connector, reason, reconnecting=0):
log.info('pb-client-connection-lost')
pb.PBClientFactory.clientConnectionLost(self, connector, reason,
reconnecting=1)
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
log.info('pb-client-connection-lost-retrying')
def clientConnectionFailed(self, connector, reason):
log.info('pb-client-connection-failed')
pb.PBClientFactory.clientConnectionFailed(self, connector, reason)
ReconnectingClientFactory.clientConnectionFailed(self, connector,
reason)
log.info('pb-client-connection-failed-retrying')
def disconnect(self, stopTrying=0):
if stopTrying:
ReconnectingClientFactory.stopTrying(self)
pb.PBClientFactory.disconnect(self)
def channel_disconnected(self, channel):
log.info('pb-channel-disconnected', channel=channel)
self.disconnect()
@inlineCallbacks
def getChannel(self):
if self.channel is None:
try:
self.channel = yield self.getRootObject()
self.channel.notifyOnDisconnect(self.channel_disconnected)
except Exception as e:
log.info('pb-client-failed-to-get-channel', exc=str(e))
self.channel = None
returnValue(self.channel)
class MapleOltHandler(object):
def __init__(self, adapter, device_id):
self.adapter = adapter
self.adapter_agent = adapter.adapter_agent
self.device_id = device_id
self.log = structlog.get_logger(device_id=device_id)
self.io_port = None
self.logical_device_id = None
self.interface = registry('main').get_args().interface
self.pbc_factory = MaplePBClientFactory()
self.pbc_port = 24498
self.tx_id = 0
self.onu_discovered_queue = DeferredQueue()
self.rx_handler = MapleOltRxHandler(self.device_id, self.adapter, self.onu_discovered_queue)
self.heartbeat_count = 0
self.heartbeat_miss = 0
self.heartbeat_interval = 1
self.heartbeat_failed_limit = 3
self.command_timeout = 5
self.pm_metrics = None
self.onus = {}
def __del__(self):
if self.io_port is not None:
registry('frameio').close_port(self.io_port)
def get_channel(self):
return self.pbc_factory.getChannel()
def get_proxy_channel_id_from_onu(self, onu_id):
return onu_id << 4
def get_onu_from_channel_id(self, channel_id):
return channel_id >> 4
def get_tunnel_tag_from_onu(self, onu):
return 1024 + (onu * 16)
def get_onu_from_tunnel_tag(self, tunnel_tag):
return (tunnel_tag - 1024) / 16
def get_new_onu_id(self, vendor, vendor_specific):
onu_id = None
for i in range(0, 63):
if i not in self.onus:
onu_id = i
break
if onu_id is not None:
self.onus[onu_id] = {'onu_id': onu_id,
'vendor': vendor,
'vendor_specific': vendor_specific}
return onu_id
def onu_exists(self, onu_id):
if onu_id in self.onus:
self.log.info('onu-exists',
onu_id=onu_id,
vendor=self.onus[onu_id]['vendor'],
vendor_specific=self.onus[onu_id]['vendor_specific'])
return self.onus[onu_id]['vendor'], self.onus[onu_id]['vendor_specific']
else:
self.log.info('onu-does-not-exist', onu_id=onu_id)
return None, None
def onu_serial_exists(self, sn_vendor, sn_vendor_specific):
for key, value in self.onus.iteritems():
if sn_vendor in value.itervalues() and sn_vendor_specific in value.itervalues():
self.log.info('onu-serial-number-exists',
onu_id=value['onu_id'],
vendor=sn_vendor,
vendor_specific=sn_vendor_specific,
onus=self.onus)
return value['onu_id']
self.log.info('onu-serial-number-does-not-exist',
vendor=sn_vendor,
vendor_specific=sn_vendor_specific,
onus=self.onus)
return None
@inlineCallbacks
def send_set_remote(self):
srv_ip = self.rx_handler.get_ip()
srv_port = self.rx_handler.get_port()
self.log.info('setting-remote-ip-port', ip=srv_ip, port=srv_port)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('set_remote', srv_ip, srv_port)
self.log.info('set-remote', data=data, ip=srv_ip, port=srv_port)
except Exception as e:
self.log.info('set-remote-exception', exc=str(e))
@inlineCallbacks
def send_config_classifier(self, olt_no, etype, ip_proto=None,
dst_port=None):
self.log.info('configuring-classifier',
olt=olt_no,
etype=etype,
ip_proto=ip_proto,
dst_port=dst_port)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('config_classifier',
olt_no,
etype,
ip_proto,
dst_port)
self.log.info('configured-classifier', data=data)
except Exception as e:
self.log.info('config-classifier-exception', exc=str(e))
@inlineCallbacks
def send_config_acflow(self, olt_no, onu_no, etype, ip_proto=None,
dst_port=None):
self.log.info('configuring-acflow',
olt=olt_no,
onu=onu_no,
etype=etype,
ip_proto=ip_proto,
dst_port=dst_port)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('config_acflow',
olt_no,
onu_no,
etype,
ip_proto,
dst_port)
self.log.info('configured-acflow', data=data)
except Exception as e:
self.log.info('config-acflow-exception', exc=str(e))
@inlineCallbacks
def send_connect_olt(self, olt_no):
self.log.info('connecting-to-olt', olt=olt_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('connect_olt', olt_no)
self.log.info('connected-to-olt', data=data)
except Exception as e:
self.log.info('connect-olt-exception', exc=str(e))
@inlineCallbacks
def send_activate_olt(self, olt_no):
self.log.info('activating-olt', olt=olt_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('activate_olt', olt_no)
self.log.info('activated-olt', data=data)
except Exception as e:
self.log.info('activate-olt-exception', exc=str(e))
@inlineCallbacks
def send_create_onu(self, olt_no, onu_no, serial_no, vendor_no):
self.log.info('creating-onu',
olt=olt_no,
onu=onu_no,
serial=serial_no,
vendor=vendor_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('create_onu',
olt_no,
onu_no,
serial_no,
vendor_no)
self.log.info('created-onu', data=data)
except Exception as e:
self.log.info('create-onu-exception', exc=str(e))
@inlineCallbacks
def send_configure_alloc_id(self, olt_no, onu_no, alloc_id):
self.log.info('configuring-alloc-id',
olt=olt_no,
onu=onu_no,
alloc_id=alloc_id)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_alloc_id',
olt_no,
onu_no,
alloc_id)
self.log.info('configured-alloc-id', data=data)
except Exception as e:
self.log.info('configure-alloc-id-exception', exc=str(e))
@inlineCallbacks
def send_configure_unicast_gem(self, olt_no, onu_no, uni_gem):
self.log.info('configuring-unicast-gem',
olt=olt_no,
onu=onu_no,
unicast_gem_port=uni_gem)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_unicast_gem',
olt_no,
onu_no,
uni_gem)
self.log.info('configured-unicast-gem', data=data)
except Exception as e:
self.log.info('configure-unicast-gem-exception', exc=str(e))
@inlineCallbacks
def send_configure_multicast_gem(self, olt_no, onu_no, multi_gem):
self.log.info('configuring-multicast-gem',
olt=olt_no,
onu=onu_no,
multicast_gem_port=multi_gem)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_multicast_gem',
olt_no,
onu_no,
multi_gem)
self.log.info('configured-multicast-gem', data=data)
except Exception as e:
self.log.info('configure-multicast-gem-exception', exc=str(e))
@inlineCallbacks
def send_configure_onu(self, olt_no, onu_no, alloc_id, uni_gem, multi_gem):
self.log.info('configuring-onu',
olt=olt_no,
onu=onu_no,
alloc_id=alloc_id,
unicast_gem_port=uni_gem,
multicast_gem_port=multi_gem)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_onu',
olt_no,
onu_no,
alloc_id,
uni_gem,
multi_gem)
self.log.info('configured-onu', data=data)
except Exception as e:
self.log.info('configure-onu-exception', exc=str(e))
@inlineCallbacks
def send_activate_onu(self, olt_no, onu_no):
self.log.info('activating-onu', olt=olt_no, onu=onu_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('activate_onu', olt_no, onu_no)
self.log.info('activated-onu', data=data)
except Exception as e:
self.log.info('activate-onu-exception', exc=str(e))
@inlineCallbacks
def heartbeat(self, device_id, state='run'):
"""Heartbeat OLT hardware
Call PB remote method 'heartbeat' to verify connectivity to OLT
hardware. If heartbeat missed self.heartbeat_failed_limit times OLT
adapter is set FAILED/UNREACHABLE.
No further action from VOLTHA core is expected as result of heartbeat
failure. Heartbeat continues following failure and once connectivity is
restored adapter state will be set to ACTIVE/REACHABLE
Arguments:
device_id: adapter device id
state: desired state (stop, start, run)
"""
self.log.debug('olt-heartbeat', device=device_id, state=state,
count=self.heartbeat_count)
def add_timeout(d, duration):
return reactor.callLater(duration, d.cancel)
def cancel_timeout(t):
if t.active():
t.cancel()
self.log.debug('olt-heartbeat-timeout-cancelled')
def heartbeat_alarm(device_id, status, heartbeat_misses=0):
try:
ts = arrow.utcnow().timestamp
alarm_data = {'heartbeats_missed':str(heartbeat_misses)}
alarm_event = self.adapter_agent.create_alarm(
id='voltha.{}.{}.olt'.format(self.adapter.name, device_id),
resource_id='olt',
type=AlarmEventType.EQUIPMENT,
category=AlarmEventCategory.PON,
severity=AlarmEventSeverity.CRITICAL,
state=AlarmEventState.RAISED if status else
AlarmEventState.CLEARED,
description='OLT Alarm - Heartbeat - {}'.format('Raised'
if status
else 'Cleared'),
context=alarm_data,
raised_ts = ts)
self.adapter_agent.submit_alarm(device_id, alarm_event)
except Exception as e:
log.exception('failed-to-submit-alarm', e=e)
if state == 'stop':
return
if state == 'start':
self.heartbeat_count = 0
self.heartbeat_miss = 0
try:
d = self.get_channel()
timeout = add_timeout(d, self.command_timeout)
remote = yield d
cancel_timeout(timeout)
d = remote.callRemote('heartbeat', self.heartbeat_count)
timeout = add_timeout(d, self.command_timeout)
data = yield d
cancel_timeout(timeout)
except Exception as e:
data = -1
self.log.info('olt-heartbeat-exception', data=data,
count=self.heartbeat_miss, exc=str(e))
if data != self.heartbeat_count:
# something is not right
self.heartbeat_miss += 1
self.log.info('olt-heartbeat-miss', data=data,
count=self.heartbeat_count, miss=self.heartbeat_miss)
else:
if self.heartbeat_miss > 0:
self.heartbeat_miss = 0
_device = self.adapter_agent.get_device(device_id)
_device.connect_status = ConnectStatus.REACHABLE
_device.oper_status = OperStatus.ACTIVE
_device.reason = ''
self.adapter_agent.update_device(_device)
heartbeat_alarm(device_id, 0)
_device = self.adapter_agent.get_device(device_id)
if (self.heartbeat_miss >= self.heartbeat_failed_limit) and \
(_device.connect_status == ConnectStatus.REACHABLE):
self.log.info('olt-heartbeat-failed', data=data,
count=self.heartbeat_miss)
_device = self.adapter_agent.get_device(device_id)
_device.connect_status = ConnectStatus.UNREACHABLE
_device.oper_status = OperStatus.FAILED
_device.reason = 'Lost connectivity to OLT'
self.adapter_agent.update_device(_device)
heartbeat_alarm(device_id, 1, self.heartbeat_miss)
self.heartbeat_count += 1
reactor.callLater(self.heartbeat_interval, self.heartbeat, device_id)
@inlineCallbacks
def arrive_onu(self):
self.log.info('arrive-onu waiting')
_data = yield self.onu_discovered_queue.get()
ok_to_arrive = False
olt_id = _data['_device_id']
pon_id = _data['_pon_id']
onu_id = self.onu_serial_exists(_data['_vendor_id'], _data['_vendor_specific'])
self.log.info('arrive-onu-detected', olt_id=olt_id, pon_ni=pon_id, onu_data=_data, onus=self.onus)
if _data['onu_id'] == 65535:
if onu_id is not None:
self.log.info('onu-activation-already-in-progress',
vendor=_data['_vendor_id'],
vendor_specific=_data['_vendor_specific'],
onus=self.onus)
else:
onu_id = self.get_new_onu_id(_data['_vendor_id'],
_data['_vendor_specific'])
self.log.info('assigned-onu-id',
onu_id=onu_id,
vendor=_data['_vendor_id'],
vendor_specific=_data['_vendor_specific'],
onus=self.onus)
ok_to_arrive = True
else:
vendor_id, vendor_specific = self.onu_exists(_data['onu_id'])
if vendor_id is not None and vendor_id == _data['_vendor_id'] and \
vendor_specific is not None and vendor_specific == _data['_vendor_specific']:
onu_id = _data['onu_id']
self.log.info('re-discovered-existing-onu',
onu_id=onu_id,
vendor=_data['_vendor_id'],
vendor_specific=_data['_vendor_specific'])
ok_to_arrive = True
else:
self.log.info('onu-id-serial-number-mismatch-detected',
onu_id=onu_id,
vendor_id=vendor_id,
new_vendor_id=_data['_vendor_id'],
vendor_specific=vendor_specific,
new_vendor_specific=_data['_vendor_specific'])
if onu_id is not None and ok_to_arrive:
self.log.info('arriving-onu', onu_id=onu_id)
tunnel_tag = self.get_tunnel_tag_from_onu(onu_id)
yield self.send_create_onu(pon_id,
onu_id,
_data['_vendor_id'],
_data['_vendor_specific'])
yield self.send_configure_alloc_id(pon_id, onu_id, tunnel_tag)
yield self.send_configure_unicast_gem(pon_id, onu_id, tunnel_tag)
yield self.send_configure_multicast_gem(pon_id, onu_id, 4000)
yield self.send_activate_onu(pon_id, onu_id)
self.adapter_agent.child_device_detected(
parent_device_id=self.device_id,
parent_port_no=100,
child_device_type='broadcom_onu',
proxy_address=Device.ProxyAddress(
device_id=self.device_id,
channel_id=self.get_proxy_channel_id_from_onu(onu_id), # c-vid
onu_id=onu_id,
onu_session_id=tunnel_tag # tunnel_tag/gem_port, alloc_id
),
admin_state=AdminState.ENABLED,
vlan=tunnel_tag,
serial_number=_data['_vendor_specific']
)
reactor.callLater(1, self.arrive_onu)
@inlineCallbacks
def activate(self, device):
self.log.info('activating-olt', device=device)
while self.onu_discovered_queue.pending:
_ = yield self.onu_discovered_queue.get()
if self.logical_device_id is None:
if not device.ipv4_address:
device.oper_status = OperStatus.FAILED
device.reason = 'No ipv4_address field provided'
self.adapter_agent.update_device(device)
return
device.root = True
device.vendor = 'Broadcom'
device.model = 'bcm68620'
device.serial_number = device.ipv4_address
self.adapter_agent.update_device(device)
nni_port = Port(
port_no=1,
label='NNI facing Ethernet port',
type=Port.ETHERNET_NNI,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE
)
self.adapter_agent.add_port(device.id, nni_port)
self.adapter_agent.add_port(device.id, Port(
port_no=100,
label='PON port',
type=Port.PON_OLT,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE
))
ld = LogicalDevice(
# not setting id and datapth_id will let the adapter
# agent pick id
desc=ofp_desc(
mfr_desc='cord project',
hw_desc='n/a',
sw_desc='logical device for Maple-based PON',
serial_num=uuid4().hex,
dp_desc='n/a'
),
switch_features=ofp_switch_features(
n_buffers=256, # TODO fake for now
n_tables=2, # TODO ditto
capabilities=( # TODO and ditto
OFPC_FLOW_STATS
| OFPC_TABLE_STATS
| OFPC_PORT_STATS
| OFPC_GROUP_STATS
)
),
root_device_id=device.id
)
ld_initialized = self.adapter_agent.create_logical_device(ld)
cap = OFPPF_1GB_FD | OFPPF_FIBER
self.adapter_agent.add_logical_port(ld_initialized.id, LogicalPort(
id='nni',
ofp_port=ofp_port(
port_no=0, # is 0 OK?
hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % 129),
name='nni',
config=0,
state=OFPPS_LIVE,
curr=cap,
advertised=cap,
peer=cap,
curr_speed=OFPPF_1GB_FD,
max_speed=OFPPF_1GB_FD
),
device_id=device.id,
device_port_no=nni_port.port_no,
root_port=True
))
device = self.adapter_agent.get_device(device.id)
device.parent_id = ld_initialized.id
device.connect_status = ConnectStatus.UNREACHABLE
device.oper_status = OperStatus.ACTIVATING
self.adapter_agent.update_device(device)
self.logical_device_id = ld_initialized.id
device = self.adapter_agent.get_device(device.id)
self.log.info('initiating-connection-to-olt',
device_id=device.id,
ipv4=device.ipv4_address,
port=self.pbc_port)
try:
reactor.connectTCP(device.ipv4_address, self.pbc_port, self.pbc_factory)
device.connect_status = ConnectStatus.REACHABLE
device.oper_status = OperStatus.ACTIVE
device.reason = ''
self.adapter_agent.update_device(device)
except Exception as e:
self.log.info('get-channel-exception', exc=str(e))
device = self.adapter_agent.get_device(device.id)
device.oper_status = OperStatus.FAILED
device.reason = 'Failed to connect to OLT'
self.adapter_agent.update_device(device)
self.pbc_factory.stopTrying()
reactor.callLater(5, self.activate, device)
return
device = self.adapter_agent.get_device(device.id)
self.log.info('connected-to-olt',
device_id=device.id,
ipv4=device.ipv4_address,
port=self.pbc_port)
reactor.callLater(0, self.heartbeat, device.id, state='start')
yield self.send_set_remote()
yield self.send_connect_olt(0)
yield self.send_activate_olt(0)
# Open the frameio port to receive in-band packet_in messages
self.log.info('registering-frameio')
self.io_port = registry('frameio').open_port(
self.interface, self.rcv_io, is_inband_frame)
# Finally set the initial PM configuration for this device
# TODO: if arrive_onu not working, the following PM stuff was commented out during testing
self.pm_metrics=MapleOltPmMetrics(device)
pm_config = self.pm_metrics.make_proto()
log.info("initial-pm-config", pm_config=pm_config)
self.adapter_agent.update_device_pm_config(pm_config,init=True)
# Apply the PM configuration
self.update_pm_metrics(device, pm_config)
reactor.callLater(1, self.arrive_onu)
self.log.info('olt-activated', device=device)
def rcv_io(self, port, frame):
self.log.info('received', iface_name=port.iface_name,
frame_len=len(frame))
pkt = Ether(frame)
if pkt.haslayer(Dot1Q):
outer_shim = pkt.getlayer(Dot1Q)
if isinstance(outer_shim.payload, Dot1Q):
inner_shim = outer_shim.payload
cvid = inner_shim.vlan
logical_port = cvid
popped_frame = (
Ether(src=pkt.src, dst=pkt.dst, type=inner_shim.type) /
inner_shim.payload
)
kw = dict(
logical_device_id=self.logical_device_id,
logical_port_no=logical_port,
)
self.log.info('sending-packet-in', **kw)
self.adapter_agent.send_packet_in(
packet=str(popped_frame), **kw)
@inlineCallbacks
def update_flow_table(self, flows, device):
self.log.info('bulk-flow-update', device_id=device.id, flows=flows)
def is_downstream(port):
return not is_upstream(port)
def is_upstream(port):
return port == 100 # Need a better way
for flow in flows:
_type = None
_ip_proto = None
_port = None
_vlan_vid = None
_udp_dst = None
_udp_src = None
_ipv4_dst = None
_ipv4_src = None
_metadata = None
_output = None
_push_tpid = None
_field = None
try:
_in_port = fd.get_in_port(flow)
assert _in_port is not None
if is_downstream(_in_port):
self.log.info('downstream-flow')
elif is_upstream(_in_port):
self.log.info('upstream-flow')
else:
raise Exception('port should be 1 or 2 by our convention')
_out_port = fd.get_out_port(flow) # may be None
self.log.info('out-port', out_port=_out_port)
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
_type = field.eth_type
self.log.info('field-type-eth-type',
eth_type=_type)
elif field.type == fd.IP_PROTO:
_ip_proto = field.ip_proto
self.log.info('field-type-ip-proto',
ip_proto=_ip_proto)
elif field.type == fd.IN_PORT:
_port = field.port
self.log.info('field-type-in-port',
in_port=_port)
elif field.type == fd.VLAN_VID:
_vlan_vid = field.vlan_vid & 0xfff
self.log.info('field-type-vlan-vid',
vlan=_vlan_vid)
elif field.type == fd.VLAN_PCP:
_vlan_pcp = field.vlan_pcp
self.log.info('field-type-vlan-pcp',
pcp=_vlan_pcp)
elif field.type == fd.UDP_DST:
_udp_dst = field.udp_dst
self.log.info('field-type-udp-dst',
udp_dst=_udp_dst)
elif field.type == fd.UDP_SRC:
_udp_src = field.udp_src
self.log.info('field-type-udp-src',
udp_src=_udp_src)
elif field.type == fd.IPV4_DST:
_ipv4_dst = field.ipv4_dst
self.log.info('field-type-ipv4-dst',
ipv4_dst=_ipv4_dst)
elif field.type == fd.IPV4_SRC:
_ipv4_src = field.ipv4_src
self.log.info('field-type-ipv4-src',
ipv4_dst=_ipv4_src)
elif field.type == fd.METADATA:
_metadata = field.table_metadata
self.log.info('field-type-metadata',
metadata=_metadata)
else:
raise NotImplementedError('field.type={}'.format(
field.type))
for action in fd.get_actions(flow):
if action.type == fd.OUTPUT:
_output = action.output.port
self.log.info('action-type-output',
output=_output, in_port=_in_port)
elif action.type == fd.POP_VLAN:
self.log.info('action-type-pop-vlan',
in_port=_in_port)
elif action.type == fd.PUSH_VLAN:
_push_tpid = action.push.ethertype
log.info('action-type-push-vlan',
push_tpid=_push_tpid, in_port=_in_port)
if action.push.ethertype != 0x8100:
self.log.error('unhandled-tpid',
ethertype=action.push.ethertype)
elif action.type == fd.SET_FIELD:
_field = action.set_field.field.ofb_field
assert (action.set_field.field.oxm_class ==
OFPXMC_OPENFLOW_BASIC)
self.log.info('action-type-set-field',
field=_field, in_port=_in_port)
if _field.type == fd.VLAN_VID:
self.log.info('set-field-type-vlan-vid',
vlan_vid=_field.vlan_vid & 0xfff)
else:
self.log.error('unsupported-action-set-field-type',
field_type=_field.type)
else:
log.error('unsupported-action-type',
action_type=action.type, in_port=_in_port)
if is_upstream(_in_port) and \
(_type == 0x888e or
(_type == 0x800 and (_ip_proto == 2 or _ip_proto == 17))):
yield self.send_config_classifier(0, _type, _ip_proto, _udp_dst)
yield self.send_config_acflow(0, _in_port, _type, _ip_proto, _udp_dst)
except Exception as e:
log.exception('failed-to-install-flow', e=e, flow=flow)
@inlineCallbacks
def send_proxied_message(self, proxy_address, msg):
if isinstance(msg, Packet):
msg = str(msg)
self.log.info('send-proxied-message',
proxy_address=proxy_address.channel_id,
msg=msg)
try:
remote = yield self.get_channel()
yield remote.callRemote("send_omci",
0,
0,
self.get_onu_from_channel_id(proxy_address.channel_id),
msg)
onu, rmsg = yield self.rx_handler.receive_omci_msg()
self.adapter_agent.receive_proxied_message(proxy_address, rmsg)
except Exception as e:
self.log.info('send-proxied_message-exception', exc=str(e))
def packet_out(self, egress_port, msg):
self.log.debug('sending-packet-out',
egress_port=egress_port,
msg_hex=hexify(msg))
pkt = Ether(msg)
out_pkt = (
Ether(src=pkt.src, dst=pkt.dst) /
Dot1Q(vlan=4091) /
Dot1Q(vlan=egress_port, type=pkt.type) /
pkt.payload
)
self.io_port.send(str(out_pkt))
@inlineCallbacks
def update_pm_metrics(self, device, pm_config):
self.log.info('update-pm-metrics', device_id=device.id,
pm_config=pm_config)
remote = yield self.get_channel()
self.pm_metrics.update(device, pm_config, remote)
|
the-stack_0_10216 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import torch
from fairseq.scoring import register_scoring
try:
from fairseq import libbleu
except ImportError as e:
import sys
sys.stderr.write("ERROR: missing libbleu.so. run `pip install --editable .`\n")
raise e
C = ctypes.cdll.LoadLibrary(libbleu.__file__)
class BleuStat(ctypes.Structure):
_fields_ = [
("reflen", ctypes.c_size_t),
("predlen", ctypes.c_size_t),
("match1", ctypes.c_size_t),
("count1", ctypes.c_size_t),
("match2", ctypes.c_size_t),
("count2", ctypes.c_size_t),
("match3", ctypes.c_size_t),
("count3", ctypes.c_size_t),
("match4", ctypes.c_size_t),
("count4", ctypes.c_size_t),
]
@register_scoring("sacrebleu")
class SacrebleuScorer(object):
def __init__(self, *unused):
import sacrebleu
self.sacrebleu = sacrebleu
self.reset()
def reset(self, one_init=False):
if one_init:
raise NotImplementedError
self.ref = []
self.sys = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.sys.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4, tokenize=None):
if order != 4:
raise NotImplementedError
if tokenize:
return self.sacrebleu.corpus_bleu(self.sys, [self.ref], tokenize=tokenize).format()
return self.sacrebleu.corpus_bleu(self.sys, [self.ref]).format()
@register_scoring("bleu")
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
self.reset()
def reset(self, one_init=False):
if one_init:
C.bleu_one_init(ctypes.byref(self.stat))
else:
C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos),
)
def score(self, order=4):
psum = sum(
math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order]
)
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = "BLEU{} = {:2.2f}, {:2.1f}"
for _ in range(1, order):
fmt += "/{:2.1f}"
fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})"
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(
order,
self.score(order=order),
*bleup,
self.brevity(),
self.stat.predlen / self.stat.reflen,
self.stat.predlen,
self.stat.reflen
)
|
the-stack_0_10218 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import allowed
class trunk_vlan_classification(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/gigabitethernet/switchport/trunk/trunk-vlan-classification. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__allowed',)
_yang_name = 'trunk-vlan-classification'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__allowed = YANGDynClass(base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'gigabitethernet', u'switchport', u'trunk', u'trunk-vlan-classification']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'GigabitEthernet', u'switchport', u'trunk']
def _get_allowed(self):
"""
Getter method for allowed, mapped from YANG variable /interface/gigabitethernet/switchport/trunk/trunk_vlan_classification/allowed (container)
YANG Description: Set the VLANs that will Xmit/Rx through the Layer2
interface
"""
return self.__allowed
def _set_allowed(self, v, load=False):
"""
Setter method for allowed, mapped from YANG variable /interface/gigabitethernet/switchport/trunk/trunk_vlan_classification/allowed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_allowed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_allowed() directly.
YANG Description: Set the VLANs that will Xmit/Rx through the Layer2
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """allowed must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__allowed = t
if hasattr(self, '_set'):
self._set()
def _unset_allowed(self):
self.__allowed = YANGDynClass(base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
allowed = __builtin__.property(_get_allowed, _set_allowed)
_pyangbind_elements = {'allowed': allowed, }
|
the-stack_0_10219 | """Binaries"""
from __future__ import print_function
from collections import defaultdict
import sys
def print_table(rows, headers=None, space_between_columns=4):
"""
Convenience method for printing a list of dictionary objects into a table. Automatically sizes the
columns to be the maximum size of any entry in the dictionary, and adds additional buffer whitespace.
Params:
rows - A list of dictionaries representing a table of information, where keys are the
headers of the table. Ex. { 'Name': 'John', 'Age': 23 }
headers - A list of the headers to print for the table. Must be a subset of the keys of
the dictionaries that compose the row. If a header isn't present or it's value
has a falsey value, the value printed is '-'.
space_between_columns - The amount of space between the columns of text. Defaults to 4.
"""
columns_to_sizing = defaultdict(int)
format_string = ''
headers = headers or rows[0].keys()
for row in rows:
for header in headers:
value = row.get(header, '-')
columns_to_sizing[header] = max(len(str(value)), columns_to_sizing[header])
for header in headers:
column_size = max(columns_to_sizing[header], len(header)) + space_between_columns
format_string += '{' + header + ':<' + str(column_size) + '}'
print(format_string.format(**{key: key for key in headers}), file=sys.stderr)
for row in rows:
defaulted_row = {header: row.get(header) or '-' for header in headers}
print(format_string.format(**defaulted_row))
|
the-stack_0_10220 | """
This script runs the application using a development server.
"""
import bottle
import os
import sys
# routes contains the HTTP handlers for our server and must be imported.
import routes
import src.emotion_bottle
if '--debug' in sys.argv[1:] or 'SERVER_DEBUG' in os.environ:
# Debug mode will enable more verbose output in the console window.
# It must be set at the beginning of the script.
bottle.debug(True)
def wsgi_app():
"""Returns the application to make available through wfastcgi. This is used
when the site is published to Microsoft Azure."""
return bottle.default_app()
if __name__ == '__main__':
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static').replace('\\', '/')
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
@bottle.route('/static/<filepath:path>')
def server_static(filepath):
"""Handler for static files, used with the development server.
When running under a production server such as IIS or Apache,
the server should be configured to serve the static files."""
return bottle.static_file(filepath, root=STATIC_ROOT)
# Starts a local test server.
bottle.run(server='wsgiref', host=HOST, port=PORT)
|
the-stack_0_10222 | """
Module for generating Arc Line lists
Should be run where it is located (for now)
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import os
import pdb
import datetime
from pkg_resources import resource_filename
from collections import OrderedDict
from astropy.table import Table, Column
line_path = resource_filename('pypeit', '/data/arc_lines/lists/')
nist_path = resource_filename('pypeit', '/data/arc_lines/NIST/')
def parser(options=None):
import argparse
# Parse
parsefunc = argparse.ArgumentParser(
description='Build the PypeIt line lists from NIST tables')
parsefunc.add_argument("-w", "--write", default=False, action='store_true', help="Actually write files?")
parsefunc.add_argument("--skip_stop", default=False, action='store_true', help="Skip warning stop?")
parsefunc.add_argument("-r", "--relint", type=float, default=1000.0, help="Set the relative intensity threshold")
parsefunc.add_argument("line", default='', help="Name of ion")
if options is None:
args = parsefunc.parse_args()
else:
args = parsefunc.parse_args(options)
return args
def init_line_list():
""" Initialize a Table for a linelist
Rigidly enforces table column formats
Strings are the most annoying
Returns
-------
init_tbl : Table
One dummy row
"""
dummy_src = str('#')*50
# Arc Line name
dummy_line = str('#')*8
#
# Dict for Table
idict = OrderedDict()
idict['ion'] = dummy_line
idict['wave'] = 0.
idict['NIST'] = 0
idict['Instr'] = 0 # Flag for instrument
idict['amplitude'] = 0
idict['Source'] = dummy_src
# Table
tkeys = idict.keys()
lst = [[idict[tkey]] for tkey in tkeys]
init_tbl = Table(lst, names=tkeys)
# Return
return init_tbl
def load_line_list(line):
"""
Parameters
----------
line : str
Name of ion
Returns
-------
line_list : Table
"""
line_file = nist_path + '{:s}_vacuum.ascii'.format(line)
# Check the NIST lines file exists
if not os.path.isfile(line_file):
raise IOError("Input line {:s} is not available".format(line))
line_list = Table.read(line_file, format='ascii.fixed_width', comment='#')
# Remove unwanted columns
tkeys = line_list.keys()
for badkey in ['Ritz', 'Acc.', 'Type', 'Ei', 'Lower', 'Upper', 'TP', 'Line']:
for tkey in tkeys:
if badkey in tkey:
line_list.remove_column(tkey)
# Relative intensity -- Strip junk off the end
reli = []
for imsk, idat in zip(line_list['Rel.'].mask, line_list['Rel.'].data):
if imsk:
reli.append(0.)
else:
try:
reli.append(float(idat))
except ValueError:
try:
reli.append(float(idat[:-1]))
except ValueError:
reli.append(0.)
line_list.remove_column('Rel.')
line_list['Rel.'] = reli
#
gdrows = line_list['Observed'] > 0. # Eliminate dummy lines
line_list = line_list[gdrows]
line_list.rename_column('Observed', 'wave')
# Others
# Grab ion name
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
ion = line_file[i0+1:i1]
line_list.add_column(Column([ion]*len(line_list), name='Ion', dtype='U5'))
line_list.add_column(Column([1]*len(line_list), name='NIST'))
return line_list
def main(args=None):
""" This script convert an input NIST table into a line list that can be used by PypeIt
Parameters
----------
args
Returns
-------
"""
# Grab arguments
pargs = parser(options=args)
line = pargs.line
relIntThreshold = pargs.relint
print("=============================================================")
print("This script is for EXPERTS ONLY")
print("Continue only if you know what you are doing")
print("Otherwise exit")
print("p.s. You need to remove the files you wish to re-build")
print("=============================================================")
if not pargs.skip_stop:
pdb.set_trace()
# Load the NIST ThAr list
llist = load_line_list(line)
# ['wave', 'Aki', 'Rel.', 'Ion', 'NIST']
# Generate a table
linelist = init_line_list()
# now add all NIST lines
nlines = llist['Ion'].size
for ll in range(nlines):
if llist['Rel.'][ll] > relIntThreshold:
linelist.add_row([llist['Ion'][ll], llist['wave'][ll], 1, 0, llist['Rel.'][ll], 'NIST'])
if ll+1 % 100 == 0:
print(ll+1, '/', nlines)
# Remove the first dummy row
linelist.remove_row(0)
# Finally, sort the list by increasing wavelength
linelist.sort('wave')
# Write?
if not pargs.write:
print("=============================================================")
print("Rerun with --write if you are happy with what you see.")
print("=============================================================")
return
# Write the table to disk
outfile = line_path + '{:s}_lines.dat'.format(line)
write_line_list(linelist, outfile)
return
def write_line_list(tbl, outfile):
"""
Parameters
----------
tbl
outfile
"""
# Format
tbl['wave'].format = '10.4f'
# Write
with open(outfile, 'w') as f:
f.write('# Creation Date: {:s}\n'.format(str(datetime.date.today().strftime('%Y-%m-%d'))))
tbl.write(f, format='ascii.fixed_width')
if __name__ == '__main__':
main()
|
the-stack_0_10223 | #!/usr/bin/env python
from tools.load import LoadMatrix
import numpy as np
lm=LoadMatrix()
traindat = np.ushort(lm.load_numbers('../data/fm_train_word.dat'))
testdat = np.ushort(lm.load_numbers('../data/fm_test_word.dat'))
parameter_list=[[traindat,testdat,1.2],[traindat,testdat,1.2]]
def kernel_linear_word (fm_train_word=traindat,fm_test_word=testdat,scale=1.2):
import shogun as sg
feats_train=sg.create_features(fm_train_word)
feats_test=sg.create_features(fm_test_word)
kernel=sg.create_kernel("LinearKernel")
kernel.init(feats_train, feats_train)
kernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale))
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return kernel
if __name__=='__main__':
print('LinearWord')
kernel_linear_word(*parameter_list[0])
|
the-stack_0_10224 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
''''
wlanのipを探るためのプログラム
'''
import socket
import fcntl
import sys
def ifconfig(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
result = fcntl.ioctl(s.fileno(), 0x8915 ,(ifname+'\0'*32)[:32])
except IOError:
return None
return socket.inet_ntoa(result[20:24])
if __name__ == '__main__':
print (ifconfig(sys.argv[1]))
|
the-stack_0_10225 | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import nltk
from nltk import word_tokenize
import json
import tensorrt as trt
def preprocess(text):
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
tokens = word_tokenize(text)
# split into lower-case word tokens, in numpy array with shape of (seq, 1)
words = np.asarray([w.lower() for w in tokens]).reshape(-1, 1)
# split words into chars, in numpy array with shape of (seq, 1, 1, 16)
chars = [[c for c in t][:16] for t in tokens]
chars = [cs+['']*(16-len(cs)) for cs in chars]
chars = np.asarray(chars).reshape(-1, 1, 1, 16)
return words, chars
def get_map_func(filepath):
file = open(filepath)
category_map = json.load(file)
category_mapper = dict(zip(category_map["cats_strings"], category_map["cats_int64s"]))
default_int64 = category_map["default_int64"]
func = lambda s: category_mapper.get(s, default_int64)
return np.vectorize(func)
def get_inputs(context, query):
cw, cc = preprocess(context)
qw, qc = preprocess(query)
context_word_func = get_map_func("CategoryMapper_4.json")
context_char_func = get_map_func("CategoryMapper_5.json")
query_word_func = get_map_func("CategoryMapper_6.json")
query_char_func = get_map_func("CategoryMapper_7.json")
cw_input = context_word_func(cw).astype(trt.nptype(trt.int32)).ravel()
cc_input = context_char_func(cc).astype(trt.nptype(trt.int32)).ravel()
qw_input = query_word_func(qw).astype(trt.nptype(trt.int32)).ravel()
qc_input = query_char_func(qc).astype(trt.nptype(trt.int32)).ravel()
return cw_input, cc_input, qw_input, qc_input
|
the-stack_0_10226 | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="scatterternary.marker", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
the-stack_0_10227 | from flask import cli
from flask.cli import FlaskGroup
from lps import create_app, db
from lps.models import *
from lps.seeds import seed_database, export_seed
from lps.mail_utils import send_alert_mail
app = create_app()
cli = FlaskGroup(create_app=create_app)
# DATABASE COMMANDS
@cli.command("seed_db")
def seed_db():
print("======== STARTING DATABASE SEED ========")
seed_database(db)
print("======== SEED COMPLETED ========")
@cli.command("reset_db")
def reset_db():
LocatorPoint.query.delete()
Unit.query.delete()
ApiKey.query.delete()
User.query.delete()
db.session.commit()
print("======== RESET DATABASE ========")
@cli.command("export_db")
def export_db():
print("======== EXPORTING DATABASE SEED ========")
export_seed()
print("======== EXPORT COMPLETED ========")
# MAIL SERVER COMMANDS
@cli.command("test_mail")
def test_mail():
send_alert_mail("")
if __name__ == '__main__':
cli() |
the-stack_0_10228 | # -*- coding: utf-8 -*-
"""column filter"""
__all__ = ['Filter', 'FilterType']
import abc
import enum
import pandas as pd
import re
from .default import ValueFetcher
from .type import basic_column_type
from pyqttable import const
from typing import List, Optional, Any
class FilterType(enum.Enum):
"""Column filter type"""
Exact = 'exact'
Contain = 'contain'
Regex = 'regex'
Expression = 'expression'
MultipleChoice = 'multiple_choice'
class Filter(metaclass=abc.ABCMeta):
"""
Column filter, including:
- filter type
- filter widget info
- filter function
"""
# Placeholder text for filter widget
PlaceHolderText = ''
def __init__(self, filter_type):
self.type = filter_type
@classmethod
def make(cls, fetcher: ValueFetcher):
"""Make Filter from ValueFetcher"""
filter_type = fetcher.get('filter_type')
# If filter_type is already Filter, just return
if isinstance(filter_type, cls):
return filter_type
# Convert filter_type to enum
try:
filter_type = FilterType(filter_type)
except Exception as e:
_ = e
else:
# Make Filter instance according to FilterType
if filter_type == FilterType.Exact:
return ExactFilter(filter_type)
elif filter_type == FilterType.Contain:
return ContainFilter(filter_type)
elif filter_type == FilterType.Regex:
return RegexFilter(filter_type)
elif filter_type == FilterType.Expression:
return ExpressionFilter(filter_type)
elif filter_type == FilterType.MultipleChoice:
return MultipleChoice(filter_type)
# If FilterType is invalid, raise error
raise TypeError(f'invalid filter type \'{filter_type}\'')
def filter(self, df: pd.DataFrame, by: str, filter_value: Any,
to_string: Optional[callable] = None,
to_value: Optional[callable] = None) -> pd.DataFrame:
"""
Filter DataFrame
Parameters
----------
df: input DataFrame to be filtered
by: column key to do filtering
filter_value: current value passed by filter widget
to_string: function to convert data from original format to string
to_value: function to convert data from string to original format
Returns
-------
Filtered DataFrame
"""
kwargs = dict(filter_value=filter_value, to_string=to_string, to_value=to_value)
return df[df[by].apply(self._filter_apply, **kwargs)].copy()
def _filter_apply(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if self.common_filter(content, filter_value):
return True
try:
return self.filter_each(content, filter_value, to_string, to_value)
except Exception as e:
_ = e
return False
@staticmethod
def common_filter(content: Any, filter_value: Any) -> bool:
"""Common filter for all kinds of Filters"""
if isinstance(filter_value, str):
if filter_value == '#blank':
return False if content else True
elif filter_value == '#non-blank':
return True if content else False
return False
@abc.abstractmethod
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
"""
Method to filter each value
Parameters
----------
content: cell data to be filtered
filter_value: current value passed by filter widget
to_string: function to convert data from original format to string
to_value: function to convert data from string to original format
Returns
-------
Remain in result or not
"""
...
class ExactFilter(Filter):
"""Perfect match filter"""
PlaceHolderText = 'Exact'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
return to_string(content) == filter_value
else:
return content == filter_value
class ContainFilter(Filter):
"""Contain filter"""
PlaceHolderText = 'Contain'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
return to_string(content).find(filter_value) > -1
else:
return False
class RegexFilter(Filter):
"""Filtered by regex expression"""
PlaceHolderText = 'Regex'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
return True if re.findall(filter_value, to_string(content)) else False
else:
return False
class ExpressionFilter(Filter):
"""Filtered by python expression"""
PlaceHolderText = 'Express'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
if not isinstance(content, tuple(basic_column_type)):
content = to_string(content)
expression = f'{content!r} {filter_value}'
try:
res = eval(expression)
except Exception as e:
_ = e
return False
else:
return False if res is False else True
else:
return False
class MultipleChoice(Filter):
"""Filter with multiple choices"""
PlaceHolderText = 'Multi'
Delimiter = const.DefaultDelimiter
def filter_each(self, content: str, filter_value: str,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
filter_list = filter_value.split(self.Delimiter)
return to_string(content) in filter_list
else:
return False
if __name__ == '__main__':
pass
|
the-stack_0_10229 | # Copyright (C) 2020 TeamUltroid
# Ported by X_ImFine
# Recode by @mrismanaziz
# RecodeV2 by @PacarFerdilla
import asyncio
import os
from datetime import datetime
from telethon import events
from telethon.tl import functions, types
from userbot.events import register
from userbot import ( # noqa pylint: disable=unused-import isort:skip
AFKREASON,
ALIVE_NAME,
BOTLOG,
BOTLOG_CHATID,
CMD_HELP,
COUNT_MSG,
ISAFK,
PM_AUTO_BAN,
USERS,
bot,
)
global USER_AFK
global afk_time
global last_afk_message
global last_afk_msg
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
last_afk_message = {}
last_afk_msg = {}
afk_start = {}
@bot.on(events.NewMessage(outgoing=True))
@bot.on(events.MessageEdited(outgoing=True))
async def set_not_afk(event):
global USER_AFK
global afk_time
global last_afk_message
global afk_start
global afk_end
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
current_message = event.message.message
if "afk" not in current_message and "yes" in USER_AFK:
try:
if pic.endswith((".tgs", ".webp")):
shite = await bot.send_message(event.chat_id, file=pic)
shites = await bot.send_message(
event.chat_id,
f"🔥 {ALIVE_NAME} __**Sudah Kembali Online...**__\n**Sejak :** `{total_afk_time}` **Yang Lalu**",
)
else:
shite = await bot.send_message(
event.chat_id,
f"🔥 __**Sudah Kembali Online...**__\n**Ada Sejak :** `{total_afk_time}` **Yang Lalu**",
file=pic,
)
except BaseException:
shite = await bot.send_message(
event.chat_id,
f"🔥 __**Sudah Kembali Online...**__\n**Kembali Chat Sejak :** `{total_afk_time}` **Yang Lalu**",
)
except BaseException:
pass
await asyncio.sleep(6)
await shite.delete()
try:
await shites.delete()
except BaseException:
pass
USER_AFK = {}
afk_time = None
os.system("rm -rf *.webp")
os.system("rm -rf *.mp4")
os.system("rm -rf *.tgs")
os.system("rm -rf *.png")
os.system("rm -rf *.jpg")
@bot.on(events.NewMessage(incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK
global afk_time
global last_afk_message
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
return False
if USER_AFK and not (await event.get_sender()).bot:
msg = None
if reason:
message_to_reply = (
f"**{ALIVE_NAME} Sedang AFK**\n\n**Sejak :** `{total_afk_time}` **Yang Lalu**\n" +
f"**Karena :** `{reason}`")
else:
message_to_reply = f"**Maaf King {ALIVE_NAME} Sedang AFK**\n\n**Sejak :** `{total_afk_time}` **Yang Lalu**"
try:
if pic.endswith((".tgs", ".webp")):
msg = await event.reply(file=pic)
msgs = await event.reply(message_to_reply)
else:
msg = await event.reply(message_to_reply, file=pic)
except BaseException:
msg = await event.reply(message_to_reply)
await asyncio.sleep(2.5)
if event.chat_id in last_afk_message:
await last_afk_message[event.chat_id].delete()
try:
if event.chat_id in last_afk_msg:
await last_afk_msg[event.chat_id].delete()
except BaseException:
pass
last_afk_message[event.chat_id] = msg
try:
if msgs:
last_afk_msg[event.chat_id] = msgs
except BaseException:
pass
@register(
outgoing=True, pattern=r"^\.afk(?: |$)(.*)", disable_errors=True
) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
reply = await event.get_reply_message()
global USER_AFK
global afk_time
global last_afk_message
global last_afk_msg
global afk_start
global afk_end
global reason
global pic
USER_AFK = {}
afk_time = None
last_afk_message = {}
last_afk_msg = {}
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
reason = event.pattern_match.group(1)
if reply:
pic = await event.client.download_media(reply)
else:
pic = None
if not USER_AFK:
last_seen_status = await bot(
functions.account.GetPrivacyRequest(types.InputPrivacyKeyStatusTimestamp())
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now()
USER_AFK = f"yes : {reason} {pic}"
if reason:
try:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(event.chat_id, file=pic)
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
)
else:
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
file=pic,
)
except BaseException:
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
)
else:
try:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(event.chat_id, file=pic)
await bot.send_message(
event.chat_id, f"**King {ALIVE_NAME} Telah AFK...**"
)
else:
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK...**",
file=pic,
)
except BaseException:
await bot.send_message(
event.chat_id, f"**King {ALIVE_NAME} Telah AFK...**"
)
await event.delete()
try:
if reason and pic:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(BOTLOG_CHATID, file=pic)
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
)
else:
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Sedang AFK**\n**Karena :** `{reason}`",
file=pic,
)
elif reason:
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Sedang AFK**\n**Karena :** `{reason}`",
)
elif pic:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(BOTLOG_CHATID, file=pic)
await bot.send_message(
BOTLOG_CHATID, f"#AFK\n**{ALIVE_NAME} Telah AFK**"
)
else:
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Sedang AFK**",
file=pic,
)
else:
await bot.send_message(
BOTLOG_CHATID, f"#AFK\n**{ALIVE_NAME} Masih aja AFK**"
)
except Exception as e:
BOTLOG_CHATIDger.warn(str(e))
CMD_HELP.update(
{
"afk": "**✘ Plugin : **`afk`\
\n\n • **Perintah :** `.afk` <alasan> bisa <sambil reply sticker/foto/gif/media>\
\n • **Function : **Memberi tahu kalau King sedang afk bisa dengan menampilkan media keren ketika seseorang menandai atau membalas salah satu pesan atau dm Anda\
\n\n • **Notes :** __Bila ada orang spam berlebihan ke Anda , tinggal ketik__ `.block`\
"
}
)
|
the-stack_0_10231 | from django.test import TestCase
from django_ses.views import (emails_parse, stats_to_list, quota_parse,
sum_stats)
# Mock of what boto's SESConnection.get_send_statistics() returns
STATS_DICT = {
u'SendDataPoints': [
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'11',
u'Rejects': u'0',
u'Timestamp': u'2011-02-28T13:50:00Z',
},
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'8',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T16:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'33',
u'Rejects': u'0',
u'Timestamp': u'2011-02-25T20:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'3',
u'Timestamp': u'2011-02-28T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'2',
u'Rejects': u'3',
u'Timestamp': u'2011-02-25T22:50:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'6',
u'Rejects': u'0',
u'Timestamp': u'2011-03-01T13:20:00Z',
},
],
}
QUOTA_DICT = {
u'GetSendQuotaResponse': {
u'GetSendQuotaResult': {
u'Max24HourSend': u'10000.0',
u'MaxSendRate': u'5.0',
u'SentLast24Hours': u'1677.0'
},
u'ResponseMetadata': {
u'RequestId': u'8f100233-44e7-11e0-a926-a198963635d8'
}
}
}
VERIFIED_EMAIL_DICT = {
u'ListVerifiedEmailAddressesResponse': {
u'ListVerifiedEmailAddressesResult': {
u'VerifiedEmailAddresses': [
u'[email protected]',
u'[email protected]',
u'[email protected]'
]
},
u'ResponseMetadata': {
u'RequestId': u'9afe9c18-44ed-11e0-802a-25a1a14c5a6e'
}
}
}
class StatParsingTest(TestCase):
def setUp(self):
self.stats_dict = STATS_DICT
self.quota_dict = QUOTA_DICT
self.emails_dict = VERIFIED_EMAIL_DICT
def test_stat_to_list(self):
expected_list = [
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'8',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T16:35:00Z',
},
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'33',
u'Rejects': u'0',
u'Timestamp': u'2011-02-25T20:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'2',
u'Rejects': u'3',
u'Timestamp': u'2011-02-25T22:50:00Z',
},
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'11',
u'Rejects': u'0',
u'Timestamp': u'2011-02-28T13:50:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'3',
u'Timestamp': u'2011-02-28T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'6',
u'Rejects': u'0',
u'Timestamp': u'2011-03-01T13:20:00Z',
},
]
actual = stats_to_list(self.stats_dict, localize=False)
self.assertEqual(len(actual), len(expected_list))
self.assertEqual(actual, expected_list)
def test_quota_parse(self):
expected = {
u'Max24HourSend': u'10000.0',
u'MaxSendRate': u'5.0',
u'SentLast24Hours': u'1677.0',
}
actual = quota_parse(self.quota_dict)
self.assertEqual(actual, expected)
def test_emails_parse(self):
expected_list = [
u'[email protected]',
u'[email protected]',
u'[email protected]',
]
actual = emails_parse(self.emails_dict)
self.assertEqual(len(actual), len(expected_list))
self.assertEqual(actual, expected_list)
def test_sum_stats(self):
expected = {
'Bounces': 2,
'Complaints': 4,
'DeliveryAttempts': 66,
'Rejects': 6,
}
stats = stats_to_list(self.stats_dict)
actual = sum_stats(stats)
self.assertEqual(actual, expected)
|
the-stack_0_10232 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-docstring, no-else-return
"""Unit tests for the Relay VM serialization and deserialization."""
import pytest
import numpy as np
import tvm
from tvm.runtime import vm as _vm
from tvm.relay import vm as rly_vm
from tvm import relay
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay import transform
from tvm.relay.prelude import Prelude
from tvm.contrib import utils
from tvm.relay import testing
def create_exec(f, target="llvm", params=None):
if isinstance(f, relay.Expr):
mod = tvm.IRModule()
mod["main"] = f
executable = rly_vm.compile(mod, target=target, params=params)
return executable
else:
assert isinstance(f, tvm.IRModule), "expected mod as tvm.IRModule"
executable = rly_vm.compile(f, target=target, params=params)
return executable
def get_serialized_output(mod, *data, params=None, target="llvm", device=tvm.cpu()):
exe = create_exec(mod, target, params=params)
code, lib = exe.save()
des_exec = _vm.Executable.load_exec(code, lib)
des_vm = _vm.VirtualMachine(des_exec, device)
result = des_vm.run(*data)
return result
def run_network(mod, params, dtype="float32"):
def get_vm_output(mod, data, params, target, device, dtype="float32"):
result = relay.create_executor("vm", mod=mod, device=device).evaluate()(data, **params)
return result.numpy().astype(dtype)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype(dtype)
target = "llvm"
dev = tvm.cpu(0)
tvm_out = get_vm_output(mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype)
vm_out = get_serialized_output(
mod, tvm.nd.array(data.astype(dtype)), params=params, target=target, device=dev
)
tvm.testing.assert_allclose(vm_out.numpy().astype(dtype), tvm_out, rtol=1e-5, atol=1e-5)
def test_serializer():
mod = tvm.IRModule({})
a = relay.const(1.0, "float32")
x = relay.var("x", shape=(10, 10), dtype="float32")
f1 = relay.Function([x], x + a)
glb_f1 = relay.GlobalVar("f1")
mod[glb_f1] = f1
# TODO(@jroesch): look into optimizing away the need to do this
mod = transform.InferType()(mod)
b = relay.const(2.0, "float32")
y = relay.var("y", shape=(10, 10), dtype="float32")
f2 = relay.Function([y], y - b)
glb_f2 = relay.GlobalVar("f2")
mod[glb_f2] = f2
# TODO(@jroesch): look into optimizing away the need to do this
mod = transform.InferType()(mod)
x1 = relay.var("x1", shape=(10, 10), dtype="float32")
y1 = relay.var("y1", shape=(10, 10), dtype="float32")
main = relay.Function([x1, y1], glb_f1(x1) * glb_f2(y1))
mod["main"] = main
exe = create_exec(mod)
glbs = exe.globals
assert len(glbs) == 3
assert "f1" in glbs
assert "f2" in glbs
assert "main" in glbs
prim_ops = exe.primitive_ops
assert any(item.startswith("vm_mod_fused_add") for item in prim_ops)
assert any(item.startswith("vm_mod_fused_subtract") for item in prim_ops)
assert any(item.startswith("vm_mod_fused_multiply") for item in prim_ops)
code = exe.bytecode
assert "main(x1, y1)" in code
assert "f1(x)" in code
assert "f2(y)" in code
code, lib = exe.save()
assert isinstance(code, bytearray)
assert isinstance(lib, tvm.runtime.Module)
def test_save_load():
x = relay.var("x", shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype("float32")
# serialize.
vm = create_exec(f)
code, lib = vm.save()
assert isinstance(code, bytearray)
# save and load the code and lib file.
tmp = utils.tempdir()
path_lib = tmp.relpath("lib.so")
lib.export_library(path_lib)
with open(tmp.relpath("code.ro"), "wb") as fo:
fo.write(code)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# deserialize.
des_exec = _vm.Executable.load_exec(loaded_code, loaded_lib)
des_vm = _vm.VirtualMachine(des_exec, tvm.cpu())
res = des_vm.run(x_data)
tvm.testing.assert_allclose(res.numpy(), x_data + x_data)
def test_const():
c = relay.const(1.0, "float32")
x = relay.var("x", shape=(10, 10), dtype="float32")
f = relay.Function([x], x + c)
x_data = np.random.rand(10, 10).astype("float32")
res = get_serialized_output(f, x_data)
tvm.testing.assert_allclose(res.numpy(), x_data + 1)
def test_if():
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(10, 10))
equal = relay.op.equal(x, y)
equal = relay.op.nn.batch_flatten(equal)
f = relay.Function([x, y], relay.If(relay.op.min(equal, axis=[0, 1]), x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(10, 10).astype("float32")
# same
res = get_serialized_output(f, x_data, x_data)
tvm.testing.assert_allclose(res.numpy(), x_data)
# diff
res = get_serialized_output(f, x_data, y_data)
tvm.testing.assert_allclose(res.numpy(), y_data)
def test_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
accum = relay.var("accum", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, "int32"))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
mod = transform.InferType()(mod)
loop_bound = 0
i_data = np.array(loop_bound, dtype="int32")
accum_data = np.array(0, dtype="int32")
iarg = relay.var("i", shape=[], dtype="int32")
aarg = relay.var("accum", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
result = get_serialized_output(mod, i_data, accum_data)
tvm.testing.assert_allclose(result.numpy(), sum(range(1, loop_bound + 1)))
def test_tuple():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var("tup", type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype("float32")
j_data = np.random.rand(10).astype("float32")
result = get_serialized_output(f, (i_data, j_data))
tvm.testing.assert_allclose(result.numpy(), j_data)
def test_adt_list():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
l1 = cons(relay.const(1), nil())
l21 = cons(relay.const(2), l1)
l321 = cons(relay.const(3), l21)
f = relay.Function([], l321)
mod["main"] = f
result = get_serialized_output(mod)
assert len(result) == 2
assert len(result[1]) == 2
assert len(result[1][1]) == 2
res = []
res.append(result[0].numpy().tolist())
res.append(result[1][0].numpy().tolist())
res.append(result[1][1][0].numpy().tolist())
tvm.testing.assert_allclose(res, np.array([3, 2, 1]))
def test_adt_compose():
mod = tvm.IRModule()
p = Prelude(mod)
compose = mod.get_global_var("compose")
# add_one = fun x -> x + 1
sb = relay.ScopeBuilder()
x = relay.var("x", "float32")
x1 = sb.let("x1", x)
xplusone = x1 + relay.const(1.0, "float32")
sb.ret(xplusone)
body = sb.get()
add_one = relay.GlobalVar("add_one")
add_one_func = relay.Function([x], body)
# add_two = compose(add_one, add_one)
sb = relay.ScopeBuilder()
y = relay.var("y", "float32")
add_two_func = sb.let("add_two", compose(add_one_func, add_one_func))
add_two_res = add_two_func(y)
sb.ret(add_two_res)
add_two_body = sb.get()
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
mod["main"] = f
x_data = np.array(np.random.rand()).astype("float32")
result = get_serialized_output(mod, x_data)
tvm.testing.assert_allclose(result.numpy(), x_data + 2.0)
def test_closure():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
f = relay.Function([x], x + y)
ff = relay.Function([y], f)
clo = ff(relay.const(1.0))
main = clo(relay.const(2.0))
res = get_serialized_output(main)
tvm.testing.assert_allclose(res.numpy(), 3.0)
def test_synthetic():
mod, params = testing.synthetic.get_workload()
run_network(mod, params)
def test_mobilenet():
mod, params = testing.mobilenet.get_workload(batch_size=1)
run_network(mod, params)
def test_vm_shape_of():
x = relay.var("x", shape=(relay.Any(), relay.Any(), relay.Any()), dtype="float32")
relu_x = relay.nn.relu(x)
data = np.random.uniform(size=(2, 3, 4)).astype("float32")
args = [data]
newshape_var = relay.var("newshape", shape=(2,), dtype="int64")
args.append(np.array((1, -1), dtype="int64"))
main = relay.Function([x, newshape_var], relay.reshape(relu_x, newshape=newshape_var))
res = get_serialized_output(main, *args).numpy()
tvm.testing.assert_allclose(res.flatten(), data.flatten())
def test_dynamic_bcast():
dtype = "float32"
x = relay.var("x", shape=(relay.Any(), 2), dtype=dtype)
y = relay.var("y", shape=(3, 2), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], relay.add(x, y))
x_data = np.random.uniform(size=(1, 2)).astype(dtype)
y_data = np.random.uniform(size=(3, 2)).astype(dtype)
res_np = np.add(x_data, y_data)
for target, dev in testing.enabled_targets():
res = get_serialized_output(mod, *(x_data, y_data), target=target, device=dev)
tvm.testing.assert_allclose(res.numpy(), res_np)
if __name__ == "__main__":
pytest.main([__file__])
|
the-stack_0_10235 | import cv2
import pickle
import os.path
import numpy as np
from imutils import paths
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten, Dense
#from helpers import resize_to_fit
LETTER_IMAGES_FOLDER = "extracted_letter_images"
MODEL_FILENAME = "captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model_labels.dat"
# initialize the data and labels
data = []
labels = []
# loop over the input images
for image_file in paths.list_images(LETTER_IMAGES_FOLDER):
# Load the image and convert it to grayscale
image = cv2.imread(image_file)
#image = cv2.threshold(image, 195, 255, cv2.THRESH_BINARY)[1]
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.threshold(image, 195, 255, cv2.THRESH_BINARY)[1]
#cv2.imshow('ImageWindow', image)
#cv2.waitKey()
# Add a third channel dimension to the image to make Keras happy
image = list(np.expand_dims(image, axis=2))
print(np.array(image).shape)
# Grab the name of the letter based on the folder it was in
label = image_file.split(os.path.sep)[-1][0]
# Add the letter image and it's label to our training data
data.append(image)
labels.append(label)
#print('data', data)
#print('labels', labels)
# scale the raw pixel intensities to the range [0, 1] (this improves training)
data = np.array(data, dtype="float") / 255
labels = np.array(labels)
# Split the training data into separate train and test sets
(X_train, X_test, Y_train, Y_test) = train_test_split(data, labels, test_size=0.25, random_state=0)
# Convert the labels (letters) into one-hot encodings that Keras can work with
lb = LabelBinarizer().fit(Y_train)
Y_train = lb.transform(Y_train)
Y_test = lb.transform(Y_test)
# Save the mapping from labels to one-hot encodings.
# We'll need this later when we use the model to decode what it's predictions mean
with open(MODEL_LABELS_FILENAME, "wb") as f:
pickle.dump(lb, f)
# Build the neural network!
model = Sequential()
# First convolutional layer with max pooling
model.add(Conv2D(20, (5, 5), padding="same", input_shape=(60, 40, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second convolutional layer with max pooling
model.add(Conv2D(50, (5, 5), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Hidden layer with 500 nodes
model.add(Flatten())
model.add(Dense(500, activation="relu"))
# Output layer with 32 nodes (one for each possible letter/number we predict)
model.add(Dense(28, activation="softmax"))
# Ask Keras to build the TensorFlow model behind the scenes
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
#print(X_train)
print(np.array(X_train).shape)
print(np.array(Y_train).shape)
# Train the neural network
model.fit(np.array(X_train), np.array(Y_train), validation_data=(X_test, Y_test), batch_size=3, epochs=10, verbose=1)
# Save the trained model to disk
model.save(MODEL_FILENAME)
|
the-stack_0_10237 | """
Setup script for libanac
"""
import sys
from setuptools import setup
import libanac
install_requires = [
'beautifulsoup4',
'requests',
]
if sys.version_info[:2] < (2, 7):
install_requires.append('argparse')
setup(
name=libanac.__title__,
description=libanac.__summary__,
long_description=open('README.rst').read(),
url=libanac.__url__,
author=libanac.__author__,
author_email=libanac.__email__,
license=libanac.__license__,
version=libanac.__version__,
packages=['libanac'],
test_suite='tests',
platforms='any',
keywords=['ANAC', 'SACI', 'CIV Digital'],
classifiers=[
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
],
install_requires=install_requires,
)
|
the-stack_0_10238 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.backend.helm.util_rules.chart_metadata import DEFAULT_API_VERSION, ChartType
def gen_chart_file(
name: str,
*,
version: str,
description: str | None = None,
type: ChartType = ChartType.APPLICATION,
api_version: str = DEFAULT_API_VERSION,
icon: str | None = None,
) -> str:
metadata_yaml = dedent(
f"""\
apiVersion: {api_version}
name: {name}
version: {version}
type: {type.value}
"""
)
if description:
metadata_yaml += f"description: {description}\n"
if icon:
metadata_yaml += f"icon: {icon}\n"
return metadata_yaml
HELM_CHART_FILE = gen_chart_file("mychart", version="0.1.0")
HELM_CHART_WITH_DEPENDENCIES_FILE = dedent(
"""\
apiVersion: v2
name: mychart
description: A Helm chart for Kubernetes
version: 0.1.0
icon: https://www.example.com/icon.png
dependencies:
- name: other_chart
repository: "@myrepo"
version: "~0.1.0"
alias: dependency_alias
"""
)
HELM_CHART_FILE_V1_FULL = dedent(
"""\
name: foo
version: 0.1.0
kubeVersion: 1.17
description: The foo chart
keywords:
- foo
- chart
home: https://example.com
sources:
- https://example.com/git
dependencies:
- name: bar
version: 0.2.0
repository: https://example.com/repo
condition: bar.enabled
tags:
- foo
- bar
import-values:
- data
alias: bar-alias
maintainers:
- name: foo
email: [email protected]
url: https://example.com/foo
icon: https://example.com/icon.png
appVersion: 0.1.0
deprecated: true
annotations:
example: yes
name: foo
"""
)
HELM_CHART_FILE_V2_FULL = dedent(
"""\
apiVersion: v2
name: quxx
version: 0.1.0
kubeVersion: 1.17
description: The foo chart
type: library
keywords:
- foo
- chart
home: https://example.com
sources:
- https://example.com/git
dependencies:
- name: bar
version: 0.2.0
repository: https://example.com/repo
condition: bar.enabled
tags:
- foo
- bar
import-values:
- data
alias: bar-alias
maintainers:
- name: foo
email: [email protected]
url: https://example.com/foo
icon: https://example.com/icon.png
appVersion: 0.1.0
deprecated: true
annotations:
example: yes
name: quxx
"""
)
K8S_SERVICE_FILE = dedent(
"""\
apiVersion: v1
kind: Service
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
protocol: TCP
name: {{ .Values.service.name }}
selector:
app: {{ template "fullname" . }}
"""
)
K8S_INGRESS_FILE_WITH_LINT_WARNINGS = dedent(
"""\
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ template "fullname" . }}
port:
name: http
"""
)
K8S_POD_FILE = dedent(
"""\
apiVersion: v1
kind: Pod
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
containers:
- name: myapp-container
image: busybox:1.28
initContainers:
- name: init-service
image: busybox:1.29
"""
)
K8S_CRD_FILE = dedent(
"""\
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
# name must match the spec fields below, and be in the form: <plural>.<group>
name: myplatforms.contoso.com
spec:
# group name to use for REST API: /apis/<group>/<version>
group: contoso.com
names:
# plural name to be used in the URL: /apis/<group>/<version>/<plural>
plural: myplatforms
# singular name to be used as an alias on the CLI and for display
singular: myplatform
# kind is normally the CamelCased singular type. Your resource manifests use this.
kind: MyPlatform
# shortNames allow shorter string to match your resource on the CLI
shortNames:
- myp
# either Namespaced or Cluster
scope: Namespaced
versions:
- name: v1alpha1
# Each version can be enabled/disabled by Served flag.
served: true
# One and only one version must be marked as the storage version.
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
appId:
type: string
language:
type: string
enum:
- csharp
- python
- go
os:
type: string
enum:
- windows
- linux
instanceSize:
type: string
enum:
- small
- medium
- large
environmentType:
type: string
enum:
- dev
- test
- prod
replicas:
type: integer
minimum: 1
required: ["appId", "language", "environmentType"]
required: ["spec"]
"""
)
HELM_TEMPLATE_HELPERS_FILE = dedent(
"""\
{{- define "fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
"""
)
HELM_VALUES_FILE = dedent(
"""\
service:
name: test
type: ClusterIP
externalPort: 80
internalPort: 1223
"""
)
|
the-stack_0_10239 | import numpy as np
from .triangle_hash import TriangleHash as _TriangleHash
def check_mesh_contains(mesh, points, hash_resolution=512):
intersector = MeshIntersector(mesh, hash_resolution)
contains = intersector.query(points)
return contains
class MeshIntersector:
def __init__(self, mesh, resolution=512):
triangles = mesh.vertices[mesh.faces].astype(np.float64)
n_tri = triangles.shape[0]
self.resolution = resolution
self.bbox_min = triangles.reshape(3 * n_tri, 3).min(axis=0)
self.bbox_max = triangles.reshape(3 * n_tri, 3).max(axis=0)
# Tranlate and scale it to [0.5, self.resolution - 0.5]^3
self.scale = (resolution - 1) / (self.bbox_max - self.bbox_min)
self.translate = 0.5 - self.scale * self.bbox_min
self._triangles = triangles = self.rescale(triangles)
# assert(np.allclose(triangles.reshape(-1, 3).min(0), 0.5))
# assert(np.allclose(triangles.reshape(-1, 3).max(0), resolution - 0.5))
triangles2d = triangles[:, :, :2]
self._tri_intersector2d = TriangleIntersector2d(
triangles2d, resolution)
def query(self, points):
# Rescale points
points = self.rescale(points)
# placeholder result with no hits we'll fill in later
contains = np.zeros(len(points), dtype=np.bool)
# cull points outside of the axis aligned bounding box
# this avoids running ray tests unless points are close
inside_aabb = np.all((0 <= points) & (points <= self.resolution), axis=1)
if not inside_aabb.any():
return contains
# Only consider points inside bounding box
mask = inside_aabb
points = points[mask]
# Compute intersection depth and check order
points_indices, tri_indices = self._tri_intersector2d.query(points[:, :2])
triangles_intersect = self._triangles[tri_indices]
points_intersect = points[points_indices]
depth_intersect, abs_n_2 = self.compute_intersection_depth(points_intersect, triangles_intersect)
# Count number of intersections in both directions
smaller_depth = depth_intersect >= points_intersect[:, 2] * abs_n_2
bigger_depth = depth_intersect < points_intersect[:, 2] * abs_n_2
points_indices_0 = points_indices[smaller_depth]
points_indices_1 = points_indices[bigger_depth]
nintersect0 = np.bincount(points_indices_0, minlength=points.shape[0])
nintersect1 = np.bincount(points_indices_1, minlength=points.shape[0])
# Check if point contained in mesh
contains1 = (np.mod(nintersect0, 2) == 1)
contains2 = (np.mod(nintersect1, 2) == 1)
if (contains1 != contains2).any():
print('Warning: contains1 != contains2 for some points.')
contains[mask] = (contains1 & contains2)
return contains
def compute_intersection_depth(self, points, triangles):
t1 = triangles[:, 0, :]
t2 = triangles[:, 1, :]
t3 = triangles[:, 2, :]
v1 = t3 - t1
v2 = t2 - t1
# v1 = v1 / np.linalg.norm(v1, axis=-1, keepdims=True)
# v2 = v2 / np.linalg.norm(v2, axis=-1, keepdims=True)
normals = np.cross(v1, v2)
alpha = np.sum(normals[:, :2] * (t1[:, :2] - points[:, :2]), axis=1)
n_2 = normals[:, 2]
t1_2 = t1[:, 2]
s_n_2 = np.sign(n_2)
abs_n_2 = np.abs(n_2)
mask = (abs_n_2 != 0)
depth_intersect = np.full(points.shape[0], np.nan)
depth_intersect[mask] = \
t1_2[mask] * abs_n_2[mask] + alpha[mask] * s_n_2[mask]
# Test the depth:
# TODO: remove and put into tests
# points_new = np.concatenate([points[:, :2], depth_intersect[:, None]], axis=1)
# alpha = (normals * t1).sum(-1)
# mask = (depth_intersect == depth_intersect)
# assert(np.allclose((points_new[mask] * normals[mask]).sum(-1),
# alpha[mask]))
return depth_intersect, abs_n_2
def rescale(self, array):
array = self.scale * array + self.translate
return array
class TriangleIntersector2d:
def __init__(self, triangles, resolution=128):
self.triangles = triangles
self.tri_hash = _TriangleHash(triangles, resolution)
def query(self, points):
point_indices, tri_indices = self.tri_hash.query(points)
point_indices = np.array(point_indices, dtype=np.int64)
tri_indices = np.array(tri_indices, dtype=np.int64)
points = points[point_indices]
triangles = self.triangles[tri_indices]
mask = self.check_triangles(points, triangles)
point_indices = point_indices[mask]
tri_indices = tri_indices[mask]
return point_indices, tri_indices
def check_triangles(self, points, triangles):
contains = np.zeros(points.shape[0], dtype=np.bool)
A = triangles[:, :2] - triangles[:, 2:]
A = A.transpose([0, 2, 1])
y = points - triangles[:, 2]
detA = A[:, 0, 0] * A[:, 1, 1] - A[:, 0, 1] * A[:, 1, 0]
mask = (np.abs(detA) != 0.)
A = A[mask]
y = y[mask]
detA = detA[mask]
s_detA = np.sign(detA)
abs_detA = np.abs(detA)
u = (A[:, 1, 1] * y[:, 0] - A[:, 0, 1] * y[:, 1]) * s_detA
v = (-A[:, 1, 0] * y[:, 0] + A[:, 0, 0] * y[:, 1]) * s_detA
sum_uv = u + v
contains[mask] = (
(0 < u) & (u < abs_detA) & (0 < v) & (v < abs_detA)
& (0 < sum_uv) & (sum_uv < abs_detA)
)
return contains
|
the-stack_0_10240 | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.beam_job_domain."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from core.domain import beam_job_domain
from core.platform import models
from core.tests import test_utils
from jobs.batch_jobs import validation_jobs
import utils
(beam_job_models,) = models.Registry.import_models([models.NAMES.beam_job])
class BeamJobTests(test_utils.TestBase):
NOW = datetime.datetime.utcnow()
def test_usage(self):
job = beam_job_domain.BeamJob(validation_jobs.AuditAllStorageModelsJob)
self.assertEqual(job.name, 'AuditAllStorageModelsJob')
def test_in_terminal_state(self):
cancelled_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.CANCELLED.value,
self.NOW, self.NOW, True)
drained_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.DRAINED.value,
self.NOW, self.NOW, True)
updated_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.UPDATED.value,
self.NOW, self.NOW, True)
done_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.DONE.value,
self.NOW, self.NOW, True)
failed_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.FAILED.value,
self.NOW, self.NOW, True)
cancelling_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.CANCELLING.value,
self.NOW, self.NOW, True)
draining_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.DRAINING.value,
self.NOW, self.NOW, True)
pending_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.PENDING.value,
self.NOW, self.NOW, True)
running_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.RUNNING.value,
self.NOW, self.NOW, True)
stopped_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.STOPPED.value,
self.NOW, self.NOW, True)
unknown_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.UNKNOWN.value,
self.NOW, self.NOW, True)
self.assertTrue(cancelled_beam_job_run.in_terminal_state)
self.assertTrue(drained_beam_job_run.in_terminal_state)
self.assertTrue(updated_beam_job_run.in_terminal_state)
self.assertTrue(done_beam_job_run.in_terminal_state)
self.assertTrue(failed_beam_job_run.in_terminal_state)
self.assertFalse(cancelling_beam_job_run.in_terminal_state)
self.assertFalse(draining_beam_job_run.in_terminal_state)
self.assertFalse(pending_beam_job_run.in_terminal_state)
self.assertFalse(running_beam_job_run.in_terminal_state)
self.assertFalse(stopped_beam_job_run.in_terminal_state)
self.assertFalse(unknown_beam_job_run.in_terminal_state)
def test_to_dict(self):
job = beam_job_domain.BeamJob(validation_jobs.AuditAllStorageModelsJob)
self.assertEqual(job.to_dict(), {'name': 'AuditAllStorageModelsJob'})
class BeamJobRunTests(test_utils.TestBase):
NOW = datetime.datetime.utcnow()
def test_usage(self):
run = beam_job_domain.BeamJobRun(
'123', 'FooJob', 'RUNNING', self.NOW, self.NOW, True)
self.assertEqual(run.job_id, '123')
self.assertEqual(run.job_name, 'FooJob')
self.assertEqual(run.job_state, 'RUNNING')
self.assertEqual(run.job_started_on, self.NOW)
self.assertEqual(run.job_updated_on, self.NOW)
self.assertTrue(run.job_is_synchronous)
def test_to_dict(self):
run = beam_job_domain.BeamJobRun(
'123', 'FooJob', 'RUNNING', self.NOW, self.NOW, True)
self.assertEqual(run.to_dict(), {
'job_id': '123',
'job_name': 'FooJob',
'job_state': 'RUNNING',
'job_started_on_msecs': utils.get_time_in_millisecs(self.NOW),
'job_updated_on_msecs': utils.get_time_in_millisecs(self.NOW),
'job_is_synchronous': True,
})
class AggregateBeamJobRunResultTests(test_utils.TestBase):
def test_usage(self):
result = beam_job_domain.AggregateBeamJobRunResult('abc', '123')
self.assertEqual(result.stdout, 'abc')
self.assertEqual(result.stderr, '123')
def test_to_dict(self):
result = beam_job_domain.AggregateBeamJobRunResult('abc', '123')
self.assertEqual(result.to_dict(), {
'stdout': 'abc',
'stderr': '123',
})
|
the-stack_0_10241 | #File with 2 function to take an screenshot in linux
import gi
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
from Xlib import display, X
from PIL import Image
#PIL
#Try to integrate with PIL to return an PIL Image
def screenShot1():
# full screenshot
window = Gdk.get_default_root_window()
pb = Gdk.pixbuf_get_from_window(window, *window.get_geometry())
pb.savev("full.png", "png", (), ())
# # screenshots for all windows
# window = Gdk.get_default_root_window()
# screen = window.get_screen()
# typ = window.get_type_hint()
# for i, w in enumerate(screen.get_window_stack()):
# pb = Gdk.pixbuf_get_from_window(w, *w.get_geometry())
# pb.savev("{}.png".format(i), "png", (), ())
# # screenshot active window
# screen = Gdk.get_default_root_window().get_screen()
# w = screen.get_active_window()
# pb = Gdk.pixbuf_get_from_window(w, *w.get_geometry())
# pb.savev("active.png", "png", (), ())
#Works with PIL, but too slow
def screenShot2():
dsp = display.Display()
root = dsp.screen().root
w = root.get_geometry().width
h = root.get_geometry().height
print(dsp.get_display_name(), w, h)
raw = root.get_image(0, 0, w, h, X.ZPixmap, 0xffffffff)
image = Image.frombytes("RGB", (w, h), raw.data, "raw", "BGRX")
# image.show()
# image.save("teste.png")
return image
def performanceTest():
import time
counter=10
while counter:
print(time.perf_counter(), counter)
screenShot2()
counter -=1
# screenShot2()
performanceTest()
|
the-stack_0_10246 | from bs4 import BeautifulSoup
import csv
from urllib.request import urlopen
from os.path import exists, join
from os import mkdir
from itertools import groupby
from operator import itemgetter
def read_page(url):
return urlopen(url).read()
def clean_comment(name_with_parenthesis):
return name_with_parenthesis.split("(")[0].strip()
def find_all_streets(html):
soup = BeautifulSoup(html)
titles = soup.find_all("h2")
assert titles[0].text.startswith("Liste"), titles[0].text
assert titles[1].text.startswith("Voir aussi") or \
titles[1].text.startswith("Source") or \
titles[1].text.startswith("Par type"), titles[1].text
all_li = titles[1].find_all_previous("li")
labels = [clean_comment(li.text) for li in all_li if clean_comment(li.text) != ""]
return labels
# From https://docs.python.org/3/library/itertools.html#itertools-recipes
def unique_justseen(iterable, key=None):
"List unique elements, preserving order. Remember only the element just seen."
# unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
# unique_justseen('ABBCcAD', str.lower) --> A B C A D
return map(next, map(itemgetter(1), groupby(iterable, key)))
def save_csv(records):
SAVE_DIR = 'data'
SAVE_FILE = join(SAVE_DIR, 'paris-streets.csv')
if not exists(SAVE_DIR):
mkdir(SAVE_DIR);
HEADER = ['street','arrondissement','from_url']
writer = csv.writer(open(SAVE_FILE, 'w'), lineterminator='\n')
writer.writerow(HEADER)
writer.writerows(records)
URLS = [
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_1er_arrondissement_de_Paris", 1),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_2e_arrondissement_de_Paris", 2),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_3e_arrondissement_de_Paris", 3),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_4e_arrondissement_de_Paris", 4),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_5e_arrondissement_de_Paris", 5),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_6e_arrondissement_de_Paris", 6),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_7e_arrondissement_de_Paris", 7),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_8e_arrondissement_de_Paris", 8),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_9e_arrondissement_de_Paris", 9),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_10e_arrondissement_de_Paris", 10),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_11e_arrondissement_de_Paris", 11),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_12e_arrondissement_de_Paris", 12),
# ("https://fr.wikipedia.org/wiki/Liste_des_voies_du_bois_de_Vincennes", 12),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_13e_arrondissement_de_Paris", 13),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_14e_arrondissement_de_Paris", 14),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_15e_arrondissement_de_Paris", 15),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_16e_arrondissement_de_Paris", 16),
# ("https://fr.wikipedia.org/wiki/Liste_des_voies_du_bois_de_Boulogne", 16),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_17e_arrondissement_de_Paris", 17),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_18e_arrondissement_de_Paris", 18),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_19e_arrondissement_de_Paris", 19),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_20e_arrondissement_de_Paris", 20),
]
records = []
for (url, num_arrondissement) in URLS:
print("Scraping {}\n".format(url))
html = read_page(url)
arrondissement_records = [(street, num_arrondissement, url) for street in find_all_streets(html)]
# Sorting ensure easy tracking of modifications in git
arrondissement_records.sort(key=lambda s: s[0].lower())
records += unique_justseen(arrondissement_records)
save_csv(records)
|
the-stack_0_10247 | #!/usr/bin/env python
import rospy
import json
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, CompressedImage
import cv2
import numpy as np
from wr8_ai.yolo import fps
import wr8_ai.detector_ncs as det
import time
class ImageReceiverROS:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("camera", Image, self.callback_img, queue_size=1)
self.image_sub = rospy.Subscriber("camera_compr", CompressedImage, self.callback_img_compressed, queue_size=1)
self.cv_image = None
self.cv_image_comp = None
def callback_img(self, data):
try:
self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logwarn(e)
def callback_img_compressed(self, data):
np_arr = np.fromstring(data.data, np.uint8)
self.cv_image_comp = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
def get_image(self):
return self.cv_image
def get_image_compressed(self):
return self.cv_image_comp
class ImagePublisherROS:
def __init__(self):
self.bridge = CvBridge()
self.image_pub = rospy.Publisher("netout/compressed", CompressedImage)
def publish(self, cv_image):
msg = CompressedImage()
msg.header.stamp = rospy.Time.now()
msg.format = "png"
msg.data = np.array(cv2.imencode('.png', cv_image)[1]).tostring()
self.image_pub.publish(msg)
def main():
rospy.init_node('test_signs')
graph_path = rospy.get_param('~graph_path')
config_path = rospy.get_param('~config_path')
fps_msr = rospy.get_param('~fps_msr', True)
fps_meter = fps.FPSMeter()
rospy.loginfo('Start processing')
detector = det.DetectorNCS()
if not detector.init(0, graph_path, config_path):
rospy.logerr('Failed to initialize detector')
img_rcvr = ImageReceiverROS()
img_pub = ImagePublisherROS()
skip_cntr = 0
while not rospy.is_shutdown():
image = img_rcvr.get_image_compressed()
if image is None:
rospy.sleep(0.01) # 10 ms
skip_cntr += 1
if skip_cntr > 300:
rospy.logwarn('No image for 3 seconds...')
skip_cntr = 0
continue
render_img = image.copy()
start = time.time()
boxes, box_img = detector.get_signs(cv_img=image, render_img=render_img)
if fps_msr:
fps_meter.update(time.time() - start)
if fps_meter.milliseconds > 5000:
fps_meter.print_statistics()
fps_meter.reset()
img_pub.publish(box_img)
# cv2.imshow('2', image)
# key = cv2.waitKey(10)
# if key == 27:
# break
# cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
the-stack_0_10248 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE\
# OVERWRITTEN WHEN RUNNING
#
# ./breeze prepare-provider-readme
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `SETUP_TEMPLATE.py.jinja2` IN the `provider_packages` DIRECTORY
"""Setup.py for the apache-airflow-backport-providers-jira package."""
import logging
import os
import sys
from os.path import dirname
from setuptools import find_packages, setup
logger = logging.getLogger(__name__)
version = '2020.10.29'
my_dir = dirname(__file__)
try:
with open(
os.path.join(my_dir, 'airflow/providers/jira/BACKPORT_PROVIDER_README.md'), encoding='utf-8'
) as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
def do_setup(version_suffix_for_pypi=''):
"""Perform the package apache-airflow-backport-providers-jira setup."""
setup(
name='apache-airflow-backport-providers-jira',
description='Backport provider package apache-airflow-backport-providers-jira for Apache Airflow',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version + version_suffix_for_pypi,
packages=find_packages(include=['airflow.providers.jira*']),
zip_safe=False,
install_requires=['apache-airflow~=1.10', 'JIRA>1.0.7'],
setup_requires=['setuptools', 'wheel'],
extras_require={},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.apache.org/',
download_url=('https://archive.apache.org/dist/airflow/backport-providers'),
python_requires='~=3.6',
project_urls={
'Documentation': 'https://airflow.apache.org/docs/',
'Bug Tracker': 'https://github.com/apache/airflow/issues',
'Source Code': 'https://github.com/apache/airflow',
},
)
#
# Note that --version-suffix-for-pypi should only be used in case we generate RC packages for PyPI
# Those packages should have actual RC version in order to be published even if source version
# should be the final one.
#
if __name__ == "__main__":
suffix = ''
if len(sys.argv) > 1 and sys.argv[1] == "--version-suffix-for-pypi":
if len(sys.argv) < 3:
print("ERROR! --version-suffix-for-pypi needs parameter!", file=sys.stderr)
sys.exit(1)
suffix = sys.argv[2]
sys.argv = [sys.argv[0]] + sys.argv[3:]
do_setup(version_suffix_for_pypi=suffix)
|
the-stack_0_10251 | """setup.py file."""
import uuid
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
__author__ = 'Hao Tang <[email protected]>'
install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
try:
reqs = [str(ir.req) for ir in install_reqs]
except:
reqs = [str(ir.requirement) for ir in install_reqs]
setup(
name="napalm-ce",
version="0.1.1",
packages=find_packages(),
author="Hao Tang",
author_email="[email protected]",
description="Network Automation and Programmability Abstraction Layer with Multivendor support",
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
url="https://github.com/napalm-automation-community/napalm-ce",
include_package_data=True,
install_requires=reqs,
)
|
the-stack_0_10253 | from copy import copy, deepcopy
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p17
from pandas.core.dtypes.common import is_scalar
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
import pandas._testing as tm
import pandas.core.common as com
# ----------------------------------------------------------------------
# Generic types test cases
class Generic:
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
"""
construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed
"""
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if is_scalar(value):
if value == "empty":
arr = None
dtype = np.float64
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
idx = list("ABCD")
# relabeling values passed into self.rename
args = [
str.lower,
{x: x.lower() for x in idx},
Series({x: x.lower() for x in idx}),
]
for axis in self._axes():
kwargs = {axis: idx}
obj = self._construct(4, **kwargs)
for arg in args:
# rename a single axis
result = obj.rename(**{axis: arg})
expected = obj.copy()
setattr(expected, axis, list("abcd"))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {self._typ._AXIS_NAMES[i]: list(range(n)) for i in range(self._ndim)}
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value="empty", **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
msg = f"The truth value of a {self._typ.__name__} is ambiguous"
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=np.nan)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
# empty
obj = self._construct(shape=0)
with pytest.raises(ValueError, match=msg):
bool(obj)
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
if obj1:
pass
with pytest.raises(ValueError, match=msg):
obj1 and obj2
with pytest.raises(ValueError, match=msg):
obj1 or obj2
with pytest.raises(ValueError, match=msg):
not obj1
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast()
self._compare(result, o)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast()
self._compare(result, o)
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
msg = (
"compound dtypes are not implemented "
f"in the {self._typ.__name__} constructor"
)
with pytest.raises(NotImplementedError, match=msg):
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
# these work (though results may be unexpected)
f("int64")
f("float64")
f("M8[ns]")
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
assert v is None
else:
assert v == getattr(y, m, None)
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = "foo"
o2 = self._construct(shape=3)
o2.name = "bar"
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(o)
self.check_metadata(o, result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
self.check_metadata(o, v1 & v1)
self.check_metadata(o, v1 | v1)
# combine_first
result = o.combine_first(o2)
self.check_metadata(o, result)
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
result = o + o2
self.check_metadata(result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
self.check_metadata(v1 & v2)
self.check_metadata(v1 | v2)
def test_head_tail(self, indices):
# GH5370
o = self._construct(shape=len(indices))
axis = o._get_axis_name(0)
setattr(o, axis, indices)
o.head()
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(len(indices) - 3))
self._compare(o.tail(-3), o.tail(len(indices) - 3))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4, random_state=seed)
)
self._compare(
o.sample(frac=0.7, random_state=seed),
o.sample(frac=0.7, random_state=seed),
)
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)),
)
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
)
self._compare(
o.sample(
frac=2, replace=True, random_state=np.random.RandomState(test)
),
o.sample(
frac=2, replace=True, random_state=np.random.RandomState(test)
),
)
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(o.sample(n=4))
os2.append(o.sample(frac=0.7))
self._compare(*os1)
self._compare(*os2)
# Check for error when random_state argument invalid.
with pytest.raises(ValueError):
o.sample(random_state="astring!")
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with pytest.raises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with pytest.raises(ValueError):
o.sample(n=-3)
with pytest.raises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with pytest.raises(ValueError):
o.sample(n=3.2)
# Check lengths are right
assert len(o.sample(n=4) == 4)
assert len(o.sample(frac=0.34) == 3)
assert len(o.sample(frac=0.36) == 4)
###
# Check weights
###
# Weight length must be right
with pytest.raises(ValueError):
o.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with pytest.raises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with pytest.raises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with pytest.raises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=nan_weights)
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
def test_sample_upsampling_without_replacement(self):
# GH27451
df = pd.DataFrame({"A": list("abc")})
msg = (
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
with pytest.raises(ValueError, match=msg):
df.sample(frac=2, replace=False)
def test_sample_is_copy(self):
# GH-27357, GH-30784: ensure the result of sample is an actual copy and
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
df = pd.DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
df2 = df.sample(3)
with tm.assert_produces_warning(None):
df2["d"] = 1
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
# See gh-12301
def test_stat_unexpected_keyword(self):
obj = self._construct(5)
starwars = "Star Wars"
errmsg = "unexpected keyword"
with pytest.raises(TypeError, match=errmsg):
obj.max(epic=starwars) # stat_function
with pytest.raises(TypeError, match=errmsg):
obj.var(epic=starwars) # stat_function_ddof
with pytest.raises(TypeError, match=errmsg):
obj.sum(epic=starwars) # cum_function
with pytest.raises(TypeError, match=errmsg):
obj.any(epic=starwars) # logical_function
@pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
def test_api_compat(self, func):
# GH 12021
# compat for __name__, __qualname__
obj = self._construct(5)
f = getattr(obj, func)
assert f.__name__ == func
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self):
obj = self._construct(5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
obj.max(out=out) # stat_function
with pytest.raises(ValueError, match=errmsg):
obj.var(out=out) # stat_function_ddof
with pytest.raises(ValueError, match=errmsg):
obj.sum(out=out) # cum_function
with pytest.raises(ValueError, match=errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self):
# GH11382
# small
shape = [int(2e3)] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype="int8", value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
shape = [int(2e6)] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype="int8", value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
self._compare(big.truncate(before=-1, after=2e6), big)
@pytest.mark.parametrize(
"func",
[copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("shape", [0, 1, 2])
def test_copy_and_deepcopy(self, shape, func):
# GH 15444
obj = self._construct(shape)
obj_copy = func(obj)
assert obj_copy is not obj
self._compare(obj_copy, obj)
@pytest.mark.parametrize(
"periods,fill_method,limit,exp",
[
(1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
(1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
(1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
(1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
(-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]),
(-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]),
(-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
(-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
],
)
def test_pct_change(self, periods, fill_method, limit, exp):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
obj = self._typ(vals)
func = getattr(obj, "pct_change")
res = func(periods=periods, fill_method=fill_method, limit=limit)
if type(obj) is DataFrame:
tm.assert_frame_equal(res, DataFrame(exp))
else:
tm.assert_series_equal(res, Series(exp))
class TestNDFrame:
# tests that don't fit elsewhere
def test_sample(sel):
# Fixes issue: 2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame(
{
"col1": range(10, 20),
"col2": range(20, 30),
"colString": ["a"] * 10,
"easyweights": easy_weight_list,
}
)
sample1 = df.sample(n=1, weights="easyweights")
tm.assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series or
# DataFrame with axis = 1.
s = Series(range(10))
with pytest.raises(ValueError):
s.sample(n=3, weights="weight_column")
with pytest.raises(ValueError):
df.sample(n=1, weights="weight_column", axis=1)
# Check weighting key error
with pytest.raises(
KeyError, match="'String passed to weights not a valid column'"
):
df.sample(n=3, weights="not_a_real_column_name")
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({"col1": range(10), "col2": ["a"] * 10})
second_column_weight = [0, 1]
tm.assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
)
# Different axis arg types
tm.assert_frame_equal(
df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
)
weight = [0] * 10
weight[5] = 0.5
tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
tm.assert_frame_equal(
df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
)
# Check out of range axis values
with pytest.raises(ValueError):
df.sample(n=1, axis=2)
with pytest.raises(ValueError):
df.sample(n=1, axis="not_a_name")
with pytest.raises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with pytest.raises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame(
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
tm.assert_frame_equal(sample1, df[["colString"]])
# Test default axes
tm.assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
)
# Test that function aligns weights with frame
df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError):
df.sample(1, weights=s4)
@pytest.mark.parametrize(
"func_str,arg",
[
("np.array", [2, 3, 1, 0]),
pytest.param(
"np.random.MT19937",
3,
marks=pytest.mark.skipif(_np_version_under1p17, reason="NumPy<1.17"),
),
pytest.param(
"np.random.PCG64",
11,
marks=pytest.mark.skipif(_np_version_under1p17, reason="NumPy<1.17"),
),
],
)
def test_sample_random_state(self, func_str, arg):
# GH32503
df = pd.DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
result = df.sample(n=3, random_state=eval(func_str)(arg))
expected = df.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_frame_equal(result, expected)
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(df.squeeze(), df["A"])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name="five", dtype=np.float64)
empty_frame = DataFrame([empty_series])
tm.assert_series_equal(empty_series, empty_series.squeeze())
tm.assert_series_equal(empty_series, empty_frame.squeeze())
# axis argument
df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis=2)
msg = "No axis named x for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis="x")
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = tm.makeFloatSeries()
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(np.squeeze(df), df["A"])
def test_transpose(self):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
def test_numpy_transpose(self):
msg = "the 'axes' parameter is not supported"
s = tm.makeFloatSeries()
tm.assert_series_equal(np.transpose(s), s)
with pytest.raises(ValueError, match=msg):
np.transpose(s, axes=1)
df = tm.makeTimeDataFrame()
tm.assert_frame_equal(np.transpose(np.transpose(df)), df)
with pytest.raises(ValueError, match=msg):
np.transpose(df, axes=1)
def test_take(self):
indices = [1, 5, -2, 6, 3, -1]
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
out = s.take(indices)
expected = Series(
data=s.values.take(indices), index=s.index.take(indices), dtype=s.dtype
)
tm.assert_series_equal(out, expected)
for df in [tm.makeTimeDataFrame()]:
out = df.take(indices)
expected = DataFrame(
data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns,
)
tm.assert_frame_equal(out, expected)
def test_take_invalid_kwargs(self):
indices = [-3, 2, 0, 1]
s = tm.makeFloatSeries()
df = tm.makeTimeDataFrame()
for obj in (s, df):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
obj.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, mode="clip")
@pytest.mark.parametrize("is_copy", [True, False])
def test_depr_take_kwarg_is_copy(self, is_copy):
# GH 27357
df = DataFrame({"A": [1, 2, 3]})
msg = (
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this."
)
with tm.assert_produces_warning(FutureWarning) as w:
df.take([0, 1], is_copy=is_copy)
assert w[0].message.args[0] == msg
s = Series([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
s.take([0, 1], is_copy=is_copy)
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 99
assert not s1.equals(s2)
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
assert s1.equals(s2)
s2[0] = 9.9
assert not s1.equals(s2)
idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
assert s1.equals(s2)
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(np.random.random(10), index=index, columns=["floats"])
df1["text"] = "the sky is so blue. we could use more chocolate.".split()
df1["start"] = date_range("2000-1-1", periods=10, freq="T")
df1["end"] = date_range("2000-1-1", periods=10, freq="D")
df1["diff"] = df1["end"] - df1["start"]
df1["bool"] = np.arange(10) % 3 == 0
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1["text"].equals(df2["text"])
assert df1["start"].equals(df2["start"])
assert df1["end"].equals(df2["end"])
assert df1["diff"].equals(df2["diff"])
assert df1["bool"].equals(df2["bool"])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different["floats"] = different["floats"].astype("float32")
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = pd.date_range("2000-1-1", periods=10, freq="T")
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(["text"], append=True)
df2 = df1.set_index(["text"], append=True)
assert df3.equals(df2)
df2 = df1.set_index(["floats"], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(["floats"], append=True)
df2 = df1.set_index(["floats"], append=True)
assert df3.equals(df2)
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2), dtype=object)
d = c.copy()
e = c.copy()
f = c.copy()
c[:-1] = d[:-1] = e[0] = f[0] = False
assert a.equals(a)
assert a.equals(b)
assert a.equals(c)
assert a.equals(d)
assert a.equals(e)
assert e.equals(f)
def test_pipe(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected = DataFrame({"A": [1, 4, 9]})
tm.assert_frame_equal(result, expected)
result = df.A.pipe(f, 2)
tm.assert_series_equal(result, expected.A)
def test_pipe_tuple(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
result = df.pipe((f, "y"), 0)
tm.assert_frame_equal(result, df)
result = df.A.pipe((f, "y"), 0)
tm.assert_series_equal(result, df.A)
def test_pipe_tuple_error(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
with pytest.raises(ValueError):
df.pipe((f, "y"), x=1, y=0)
with pytest.raises(ValueError):
df.A.pipe((f, "y"), x=1, y=0)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_axis_classmethods(self, box):
obj = box(dtype=object)
values = (
list(box._AXIS_NAMES.keys())
+ list(box._AXIS_NUMBERS.keys())
+ list(box._AXIS_ALIASES.keys())
)
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
|
the-stack_0_10255 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('image_collection', '0006_imageslide_data_class'),
]
operations = [
migrations.AddField(
model_name='imageslide',
name='is_visible_on_mobile',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='imageslide',
name='mobile_link',
field=models.CharField(help_text='i.e. "{route: "shop/cateogry", categoryName: "artworks"}"', max_length=4000, verbose_name='mobile link', blank=True),
),
]
|
the-stack_0_10257 | pkgname = "dejagnu"
pkgver = "1.6.3"
pkgrel = 0
build_style = "gnu_configure"
make_cmd = "gmake"
hostmakedepends = ["gmake", "expect-devel"]
makedepends = ["expect-devel"]
depends = ["expect"]
pkgdesc = "Framework for running test suites on GNU tools"
maintainer = "q66 <[email protected]>"
license = "GPL-3.0-or-later"
url = "http://www.gnu.org/software/dejagnu"
source = f"$(GNU_SITE)/{pkgname}/{pkgname}-{pkgver}.tar.gz"
sha256 = "87daefacd7958b4a69f88c6856dbd1634261963c414079d0c371f589cd66a2e3"
# like 4 tests fail and it's impossible to tell what is going on
options = ["!check"] |
the-stack_0_10258 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Callbacks to use with the Trainer class and customize the training loop.
"""
import collections
import dataclasses
import json
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import numpy as np
from tqdm.auto import tqdm
from .trainer_utils import IntervalStrategy
from .training_args import TrainingArguments
from .utils import logging
logger = logging.get_logger(__name__)
@dataclass
class TrainerState:
"""
A class containing the [`Trainer`] inner state that will be saved along the model and optimizer
when checkpointing and passed to the [`TrainerCallback`].
<Tip>
In all this class, one step is to be understood as one update step. When using gradient accumulation, one
update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`,
then one update step requires going through *n* batches.
</Tip>
Args:
epoch (`float`, *optional*):
Only set during training, will represent the epoch the training is at (the decimal part being the
percentage of the current epoch completed).
global_step (`int`, *optional*, defaults to 0):
During training, represents the number of update steps completed.
max_steps (`int`, *optional*, defaults to 0):
The number of update steps to do during the current training.
total_flos (`float`, *optional*, defaults to 0):
The total number of floating operations done by the model since the beginning of training (stored as floats
to avoid overflow).
log_history (`List[Dict[str, float]]`, *optional*):
The list of logs done since the beginning of training.
best_metric (`float`, *optional*):
When tracking the best model, the value of the best metric encountered so far.
best_model_checkpoint (`str`, *optional*):
When tracking the best model, the value of the name of the checkpoint for the best model encountered so
far.
is_local_process_zero (`bool`, *optional*, defaults to `True`):
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
several machines) main process.
is_world_process_zero (`bool`, *optional*, defaults to `True`):
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
is_hyper_param_search (`bool`, *optional*, defaults to `False`):
Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will
impact the way data will be logged in TensorBoard.
"""
epoch: Optional[float] = None
global_step: int = 0
max_steps: int = 0
num_train_epochs: int = 0
total_flos: float = 0
log_history: List[Dict[str, float]] = None
best_metric: Optional[float] = None
best_model_checkpoint: Optional[str] = None
is_local_process_zero: bool = True
is_world_process_zero: bool = True
is_hyper_param_search: bool = False
trial_name: str = None
trial_params: Dict[str, Union[str, float, int, bool]] = None
def __post_init__(self):
if self.log_history is None:
self.log_history = []
def save_to_json(self, json_path: str):
"""Save the content of this instance in JSON format inside `json_path`."""
json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n"
with open(json_path, "w", encoding="utf-8") as f:
f.write(json_string)
@classmethod
def load_from_json(cls, json_path: str):
"""Create an instance from the content of `json_path`."""
with open(json_path, "r", encoding="utf-8") as f:
text = f.read()
return cls(**json.loads(text))
@dataclass
class TrainerControl:
"""
A class that handles the [`Trainer`] control flow. This class is used by the
[`TrainerCallback`] to activate some switches in the training loop.
Args:
should_training_stop (`bool`, *optional*, defaults to `False`):
Whether or not the training should be interrupted.
If `True`, this variable will not be set back to `False`. The training will just stop.
should_epoch_stop (`bool`, *optional*, defaults to `False`):
Whether or not the current epoch should be interrupted.
If `True`, this variable will be set back to `False` at the beginning of the next epoch.
should_save (`bool`, *optional*, defaults to `False`):
Whether or not the model should be saved at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_evaluate (`bool`, *optional*, defaults to `False`):
Whether or not the model should be evaluated at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_log (`bool`, *optional*, defaults to `False`):
Whether or not the logs should be reported at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
"""
should_training_stop: bool = False
should_epoch_stop: bool = False
should_save: bool = False
should_evaluate: bool = False
should_log: bool = False
def _new_training(self):
"""Internal method that resets the variable for a new training."""
self.should_training_stop = False
def _new_epoch(self):
"""Internal method that resets the variable for a new epoch."""
self.should_epoch_stop = False
def _new_step(self):
"""Internal method that resets the variable for a new step."""
self.should_save = False
self.should_evaluate = False
self.should_log = False
class TrainerCallback:
"""
A class for objects that will inspect the state of the training loop at some events and take some decisions. At
each of those events the following arguments are available:
Args:
args ([`TrainingArguments`]):
The training arguments used to instantiate the [`Trainer`].
state ([`TrainerState`]):
The current state of the [`Trainer`].
control ([`TrainerControl`]):
The object that is returned to the [`Trainer`] and can be used to make some decisions.
model ([`PreTrainedModel`] or `torch.nn.Module`):
The model being trained.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer used for encoding the data.
optimizer (`torch.optim.Optimizer`):
The optimizer used for the training steps.
lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`):
The scheduler used for setting the learning rate.
train_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for training.
eval_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for training.
metrics (`Dict[str, float]`):
The metrics computed by the last evaluation phase.
Those are only accessible in the event `on_evaluate`.
logs (`Dict[str, float]`):
The values to log.
Those are only accessible in the event `on_log`.
The `control` object is the only one that can be changed by the callback, in which case the event that changes
it should return the modified version.
The argument `args`, `state` and `control` are positionals for all events, all the others are
grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example,
see the code of the simple [`~transformer.PrinterCallback`].
Example:
```python
class PrinterCallback(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
```"""
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of the initialization of the [`Trainer`].
"""
pass
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of training.
"""
pass
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of training.
"""
pass
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of an epoch.
"""
pass
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an epoch.
"""
pass
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an substep during gradient accumulation.
"""
pass
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after an evaluation phase.
"""
pass
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a checkpoint save.
"""
pass
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after logging the last logs.
"""
pass
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a prediction step.
"""
pass
class CallbackHandler(TrainerCallback):
"""Internal class that just calls the list of callbacks in order."""
def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler):
self.callbacks = []
for cb in callbacks:
self.add_callback(cb)
self.model = model
self.tokenizer = tokenizer
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.train_dataloader = None
self.eval_dataloader = None
if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks):
logger.warning(
"The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n"
+ "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
+ "callbacks is\n:"
+ self.callback_list
)
def add_callback(self, callback):
cb = callback() if isinstance(callback, type) else callback
cb_class = callback if isinstance(callback, type) else callback.__class__
if cb_class in [c.__class__ for c in self.callbacks]:
logger.warning(
f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current"
+ "list of callbacks is\n:"
+ self.callback_list
)
self.callbacks.append(cb)
def pop_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return cb
else:
for cb in self.callbacks:
if cb == callback:
self.callbacks.remove(cb)
return cb
def remove_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return
else:
self.callbacks.remove(callback)
@property
def callback_list(self):
return "\n".join(cb.__class__.__name__ for cb in self.callbacks)
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_init_end", args, state, control)
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_training_stop = False
return self.call_event("on_train_begin", args, state, control)
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_train_end", args, state, control)
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_epoch_stop = False
return self.call_event("on_epoch_begin", args, state, control)
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_epoch_end", args, state, control)
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_log = False
control.should_evaluate = False
control.should_save = False
return self.call_event("on_step_begin", args, state, control)
def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_substep_end", args, state, control)
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_step_end", args, state, control)
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
control.should_evaluate = False
return self.call_event("on_evaluate", args, state, control, metrics=metrics)
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_save = False
return self.call_event("on_save", args, state, control)
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs):
control.should_log = False
return self.call_event("on_log", args, state, control, logs=logs)
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_prediction_step", args, state, control)
def call_event(self, event, args, state, control, **kwargs):
for callback in self.callbacks:
result = getattr(callback, event)(
args,
state,
control,
model=self.model,
tokenizer=self.tokenizer,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
train_dataloader=self.train_dataloader,
eval_dataloader=self.eval_dataloader,
**kwargs,
)
# A Callback can skip the return of `control` if it doesn't change it.
if result is not None:
control = result
return control
class DefaultFlowCallback(TrainerCallback):
"""
A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation
and checkpoints.
"""
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if state.global_step == 1 and args.logging_first_step:
control.should_log = True
if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % args.logging_steps == 0:
control.should_log = True
# Evaluate
if args.evaluation_strategy == IntervalStrategy.STEPS and state.global_step % args.eval_steps == 0:
control.should_evaluate = True
# Save
if (
args.save_strategy == IntervalStrategy.STEPS
and args.save_steps > 0
and state.global_step % args.save_steps == 0
):
control.should_save = True
# End training
if state.global_step >= state.max_steps:
control.should_training_stop = True
return control
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if args.logging_strategy == IntervalStrategy.EPOCH:
control.should_log = True
# Evaluate
if args.evaluation_strategy == IntervalStrategy.EPOCH:
control.should_evaluate = True
# Save
if args.save_strategy == IntervalStrategy.EPOCH:
control.should_save = True
return control
class ProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation.
"""
def __init__(self):
self.training_bar = None
self.prediction_bar = None
def on_train_begin(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar = tqdm(total=state.max_steps)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.update(state.global_step - self.current_step)
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if state.is_local_process_zero and isinstance(eval_dataloader.dataset, collections.abc.Sized):
if self.prediction_bar is None:
self.prediction_bar = tqdm(total=len(eval_dataloader), leave=self.training_bar is None)
self.prediction_bar.update(1)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_local_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_local_process_zero and self.training_bar is not None:
_ = logs.pop("total_flos", None)
self.training_bar.write(str(logs))
def on_train_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.close()
self.training_bar = None
class PrinterCallback(TrainerCallback):
"""
A bare [`TrainerCallback`] that just prints the logs.
"""
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
class EarlyStoppingCallback(TrainerCallback):
"""
A [`TrainerCallback`] that handles early stopping.
Args:
early_stopping_patience (`int`):
Use with `metric_for_best_model` to stop training when the specified metric worsens for
`early_stopping_patience` evaluation calls.
early_stopping_threshold(`float`, *optional*):
Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how
much the specified metric must improve to satisfy early stopping conditions. `
This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality
to set best_metric in [`TrainerState`].
"""
def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
# best_metric is set by code for load_best_model
operator = np.greater if args.greater_is_better else np.less
if state.best_metric is None or (
operator(metric_value, state.best_metric)
and abs(metric_value - state.best_metric) > self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
def on_train_begin(self, args, state, control, **kwargs):
assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
assert (
args.metric_for_best_model is not None
), "EarlyStoppingCallback requires metric_for_best_model is defined"
assert (
args.evaluation_strategy != IntervalStrategy.NO
), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
|
the-stack_0_10259 | # coding: utf-8
""" sick, the spectroscopic inference crank """
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
major, minor1, minor2, release, serial = sys.version_info
open_kwargs = {"encoding": "utf-8"} if major >= 3 else {}
def rf(filename):
with open(filename, **open_kwargs) as fp:
contents = fp.read()
return contents
version_regex = re.compile("__version__ = \"(.*?)\"")
contents = rf(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"sick", "__init__.py"))
version = version_regex.findall(contents)[0]
setup(name="sick",
version=version,
author="Andrew R. Casey",
author_email="[email protected]",
packages=[
"sick",
"sick.models",
"sick.clis",
"sick.specutils"],#"sick.tests"],
url="http://www.github.com/andycasey/sick/",
license="MIT",
description="Infer astrophysical parameters from spectra",
long_description=rf(os.path.join(os.path.dirname(__file__), "README.md")),
install_requires=rf(
os.path.join(os.path.dirname(__file__), "requirements.md")).split("\n"),
entry_points={
"console_scripts": [
"sick-models = sick.clis.models:main",
"sick = sick.clis.run:main"
]
}
)
|
the-stack_0_10260 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Evaluation CLI.
"""
import argparse
import logging
import sys
from collections import defaultdict
from functools import partial
from typing import Callable, Iterable, Dict, List, Tuple, Optional
import numpy as np
from contrib import sacrebleu, rouge
from . import arguments
from . import constants as C
from . import data_io
from . import utils
from .log import setup_main_logger, log_sockeye_version
logger = setup_main_logger(__name__, file_logging=False)
def raw_corpus_bleu(hypotheses: Iterable[str], references: Iterable[str], offset: Optional[float] = 0.01) -> float:
"""
Simple wrapper around sacreBLEU's BLEU without tokenization and smoothing.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:param offset: Smoothing constant.
:return: BLEU score as float between 0 and 1.
"""
return sacrebleu.raw_corpus_bleu(hypotheses, [references], smooth_floor=offset).score / 100.0
def raw_corpus_chrf(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around sacreBLEU's chrF implementation, without tokenization.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: chrF score as float between 0 and 1.
"""
return sacrebleu.corpus_chrf(hypotheses, references, order=sacrebleu.CHRF_ORDER, beta=sacrebleu.CHRF_BETA,
remove_whitespace=True)
def raw_corpus_rouge1(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-1 score as float between 0 and 1.
"""
return rouge.rouge_1(hypotheses, references)
def raw_corpus_rouge2(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-2 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-2 score as float between 0 and 1.
"""
return rouge.rouge_2(hypotheses, references)
def raw_corpus_rougel(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-L score as float between 0 and 1.
"""
return rouge.rouge_l(hypotheses, references)
def main():
params = argparse.ArgumentParser(description='Evaluate translations by calculating metrics with '
'respect to a reference set. If multiple hypotheses files are given'
'the mean and standard deviation of the metrics are reported.')
arguments.add_evaluate_args(params)
arguments.add_logging_args(params)
args = params.parse_args()
if args.quiet:
logger.setLevel(logging.ERROR)
utils.check_condition(args.offset >= 0, "Offset should be non-negative.")
log_sockeye_version(logger)
logger.info("Command: %s", " ".join(sys.argv))
logger.info("Arguments: %s", args)
references = [' '.join(e) for e in data_io.read_content(args.references)]
all_hypotheses = [[h.strip() for h in hypotheses] for hypotheses in args.hypotheses]
if not args.not_strict:
for hypotheses in all_hypotheses:
utils.check_condition(len(hypotheses) == len(references),
"Number of hypotheses (%d) and references (%d) does not match." % (len(hypotheses),
len(references)))
logger.info("%d hypothesis set(s) | %d hypotheses | %d references",
len(all_hypotheses), len(all_hypotheses[0]), len(references))
metric_info = ["%s\t(s_opt)" % name for name in args.metrics]
logger.info("\t".join(metric_info))
metrics = [] # type: List[Tuple[str, Callable]]
for name in args.metrics:
if name == C.BLEU:
func = partial(raw_corpus_bleu, offset=args.offset)
elif name == C.CHRF:
func = raw_corpus_chrf
elif name == C.ROUGE1:
func = raw_corpus_rouge1
elif name == C.ROUGE2:
func = raw_corpus_rouge2
elif name == C.ROUGEL:
func = raw_corpus_rougel
else:
raise ValueError("Unknown metric %s." % name)
metrics.append((name, func))
if not args.sentence:
scores = defaultdict(list) # type: Dict[str, List[float]]
for hypotheses in all_hypotheses:
for name, metric in metrics:
scores[name].append(metric(hypotheses, references))
_print_mean_std_score(metrics, scores)
else:
for hypotheses in all_hypotheses:
for h, r in zip(hypotheses, references):
scores = defaultdict(list) # type: Dict[str, List[float]]
for name, metric in metrics:
scores[name].append(metric([h], [r]))
_print_mean_std_score(metrics, scores)
def _print_mean_std_score(metrics: List[Tuple[str, Callable]], scores: Dict[str, List[float]]):
scores_mean_std = [] # type: List[str]
for name, _ in metrics:
if len(scores[name]) > 1:
score_mean = np.asscalar(np.mean(scores[name]))
score_std = np.asscalar(np.std(scores[name], ddof=1))
scores_mean_std.append("%.3f\t%.3f" % (score_mean, score_std))
else:
score = scores[name][0]
scores_mean_std.append("%.3f\t(-)" % score)
print("\t".join(scores_mean_std))
if __name__ == '__main__':
main()
|
the-stack_0_10262 | from functools import partial
from collections.abc import Iterable
from collections import defaultdict
from PySide2 import QtCore
from PySide2.QtWidgets import (QWidget, QPushButton, QHBoxLayout, QVBoxLayout,
QGroupBox, QFormLayout, QLabel, QLineEdit,
QComboBox, QSpinBox, QDoubleSpinBox, QSizePolicy,
QCheckBox, QDockWidget, QScrollArea, QListWidget,
QListWidgetItem, QTreeWidget, QTreeWidgetItem)
from matplotlib import cm as mcolormaps
import numpy as np
import openmc
from .custom_widgets import HorizontalLine, Expander
from .scientific_spin_box import ScientificDoubleSpinBox
from .plotmodel import (_SCORE_UNITS, _TALLY_VALUES,
_REACTION_UNITS, _SPATIAL_FILTERS)
class PlotterDock(QDockWidget):
"""
Dock widget with common settings for the plotting application
"""
def __init__(self, model, font_metric, parent=None):
super().__init__(parent)
self.model = model
self.font_metric = font_metric
self.main_window = parent
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
class DomainDock(PlotterDock):
"""
Domain options dock
"""
def __init__(self, model, font_metric, parent=None):
super().__init__(model, font_metric, parent)
self.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea)
# Create Controls
self._createOriginBox()
self._createOptionsBox()
self._createResolutionBox()
# Create submit button
self.applyButton = QPushButton("Apply Changes")
# Mac bug fix
self.applyButton.setMinimumHeight(self.font_metric.height() * 1.6)
self.applyButton.clicked.connect(self.main_window.applyChanges)
# Create Zoom box
self.zoomBox = QSpinBox()
self.zoomBox.setSuffix(' %')
self.zoomBox.setRange(25, 2000)
self.zoomBox.setValue(100)
self.zoomBox.setSingleStep(25)
self.zoomBox.valueChanged.connect(self.main_window.editZoom)
self.zoomLayout = QHBoxLayout()
self.zoomLayout.addWidget(QLabel('Zoom:'))
self.zoomLayout.addWidget(self.zoomBox)
self.zoomLayout.setContentsMargins(0, 0, 0, 0)
self.zoomWidget = QWidget()
self.zoomWidget.setLayout(self.zoomLayout)
# Create Layout
self.dockLayout = QVBoxLayout()
self.dockLayout.addWidget(QLabel("Geometry/Properties"))
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.originGroupBox)
self.dockLayout.addWidget(self.optionsGroupBox)
self.dockLayout.addWidget(self.resGroupBox)
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.zoomWidget)
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addStretch()
self.dockLayout.addWidget(self.applyButton)
self.dockLayout.addWidget(HorizontalLine())
self.optionsWidget = QWidget()
self.optionsWidget.setLayout(self.dockLayout)
self.setWidget(self.optionsWidget)
def _createOriginBox(self):
# X Origin
self.xOrBox = QDoubleSpinBox()
self.xOrBox.setDecimals(9)
self.xOrBox.setRange(-99999, 99999)
xbox_connector = partial(self.main_window.editSingleOrigin,
dimension=0)
self.xOrBox.valueChanged.connect(xbox_connector)
# Y Origin
self.yOrBox = QDoubleSpinBox()
self.yOrBox.setDecimals(9)
self.yOrBox.setRange(-99999, 99999)
ybox_connector = partial(self.main_window.editSingleOrigin,
dimension=1)
self.yOrBox.valueChanged.connect(ybox_connector)
# Z Origin
self.zOrBox = QDoubleSpinBox()
self.zOrBox.setDecimals(9)
self.zOrBox.setRange(-99999, 99999)
zbox_connector = partial(self.main_window.editSingleOrigin,
dimension=2)
self.zOrBox.valueChanged.connect(zbox_connector)
# Origin Form Layout
self.orLayout = QFormLayout()
self.orLayout.addRow('X:', self.xOrBox)
self.orLayout.addRow('Y:', self.yOrBox)
self.orLayout.addRow('Z:', self.zOrBox)
self.orLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.orLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
# Origin Group Box
self.originGroupBox = QGroupBox('Origin')
self.originGroupBox.setLayout(self.orLayout)
def _createOptionsBox(self):
# Width
self.widthBox = QDoubleSpinBox(self)
self.widthBox.setRange(.1, 99999)
self.widthBox.setDecimals(9)
self.widthBox.valueChanged.connect(self.main_window.editWidth)
# Height
self.heightBox = QDoubleSpinBox(self)
self.heightBox.setRange(.1, 99999)
self.heightBox.setDecimals(9)
self.heightBox.valueChanged.connect(self.main_window.editHeight)
# ColorBy
self.colorbyBox = QComboBox(self)
self.colorbyBox.addItem("material")
self.colorbyBox.addItem("cell")
self.colorbyBox.addItem("temperature")
self.colorbyBox.addItem("density")
self.colorbyBox.currentTextChanged[str].connect(
self.main_window.editColorBy)
# Universe level (applies to cell coloring only)
self.universeLevelBox = QComboBox(self)
self.universeLevelBox.addItem('all')
for i in range(self.model.max_universe_levels):
self.universeLevelBox.addItem(str(i))
self.universeLevelBox.currentTextChanged[str].connect(
self.main_window.editUniverseLevel)
# Alpha
self.domainAlphaBox = QDoubleSpinBox(self)
self.domainAlphaBox.setValue(self.model.activeView.domainAlpha)
self.domainAlphaBox.setSingleStep(0.05)
self.domainAlphaBox.setDecimals(2)
self.domainAlphaBox.setRange(0.0, 1.0)
self.domainAlphaBox.valueChanged.connect(self.main_window.editPlotAlpha)
# Visibility
self.visibilityBox = QCheckBox(self)
self.visibilityBox.stateChanged.connect(
self.main_window.editPlotVisibility)
# Outlines
self.outlinesBox = QCheckBox(self)
self.outlinesBox.stateChanged.connect(self.main_window.toggleOutlines)
# Basis
self.basisBox = QComboBox(self)
self.basisBox.addItem("xy")
self.basisBox.addItem("xz")
self.basisBox.addItem("yz")
self.basisBox.currentTextChanged.connect(self.main_window.editBasis)
# Advanced Color Options
self.colorOptionsButton = QPushButton('Color Options...')
self.colorOptionsButton.setMinimumHeight(self.font_metric.height() * 1.6)
self.colorOptionsButton.clicked.connect(self.main_window.showColorDialog)
# Options Form Layout
self.opLayout = QFormLayout()
self.opLayout.addRow('Width:', self.widthBox)
self.opLayout.addRow('Height:', self.heightBox)
self.opLayout.addRow('Basis:', self.basisBox)
self.opLayout.addRow('Color By:', self.colorbyBox)
self.opLayout.addRow('Universe Level:', self.universeLevelBox)
self.opLayout.addRow('Plot alpha:', self.domainAlphaBox)
self.opLayout.addRow('Visible:', self.visibilityBox)
self.opLayout.addRow('Outlines:', self.outlinesBox)
self.opLayout.addRow(self.colorOptionsButton)
self.opLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.opLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
# Options Group Box
self.optionsGroupBox = QGroupBox('Options')
self.optionsGroupBox.setLayout(self.opLayout)
def _createResolutionBox(self):
# Horizontal Resolution
self.hResBox = QSpinBox(self)
self.hResBox.setRange(1, 99999)
self.hResBox.setSingleStep(25)
self.hResBox.setSuffix(' px')
self.hResBox.valueChanged.connect(self.main_window.editHRes)
# Vertical Resolution
self.vResLabel = QLabel('Pixel Height:')
self.vResBox = QSpinBox(self)
self.vResBox.setRange(1, 99999)
self.vResBox.setSingleStep(25)
self.vResBox.setSuffix(' px')
self.vResBox.valueChanged.connect(self.main_window.editVRes)
# Ratio checkbox
self.ratioCheck = QCheckBox("Fixed Aspect Ratio", self)
self.ratioCheck.stateChanged.connect(self.main_window.toggleAspectLock)
# Resolution Form Layout
self.resLayout = QFormLayout()
self.resLayout.addRow(self.ratioCheck)
self.resLayout.addRow('Pixel Width:', self.hResBox)
self.resLayout.addRow(self.vResLabel, self.vResBox)
self.resLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.resLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
# Resolution Group Box
self.resGroupBox = QGroupBox("Resolution")
self.resGroupBox.setLayout(self.resLayout)
def updateDock(self):
self.updateOrigin()
self.updateWidth()
self.updateHeight()
self.updateColorBy()
self.updateUniverseLevel()
self.updatePlotAlpha()
self.updatePlotVisibility()
self.updateOutlines()
self.updateBasis()
self.updateAspectLock()
self.updateHRes()
self.updateVRes()
def updateOrigin(self):
self.xOrBox.setValue(self.model.activeView.origin[0])
self.yOrBox.setValue(self.model.activeView.origin[1])
self.zOrBox.setValue(self.model.activeView.origin[2])
def updateWidth(self):
self.widthBox.setValue(self.model.activeView.width)
def updateHeight(self):
self.heightBox.setValue(self.model.activeView.height)
def updateColorBy(self):
self.colorbyBox.setCurrentText(self.model.activeView.colorby)
if self.model.activeView.colorby != 'cell':
self.universeLevelBox.setEnabled(False)
else:
self.universeLevelBox.setEnabled(True)
def updateUniverseLevel(self):
self.universeLevelBox.setCurrentIndex(self.model.activeView.level + 1)
def updatePlotAlpha(self):
self.domainAlphaBox.setValue(self.model.activeView.domainAlpha)
def updatePlotVisibility(self):
self.visibilityBox.setChecked(self.model.activeView.domainVisible)
def updateOutlines(self):
self.outlinesBox.setChecked(self.model.activeView.outlines)
def updateBasis(self):
self.basisBox.setCurrentText(self.model.activeView.basis)
def updateAspectLock(self):
aspect_lock = bool(self.model.activeView.aspectLock)
self.ratioCheck.setChecked(aspect_lock)
self.vResBox.setDisabled(aspect_lock)
self.vResLabel.setDisabled(aspect_lock)
def updateHRes(self):
self.hResBox.setValue(self.model.activeView.h_res)
def updateVRes(self):
self.vResBox.setValue(self.model.activeView.v_res)
def revertToCurrent(self):
cv = self.model.currentView
self.xOrBox.setValue(cv.origin[0])
self.yOrBox.setValue(cv.origin[1])
self.zOrBox.setValue(cv.origin[2])
self.widthBox.setValue(cv.width)
self.heightBox.setValue(cv.height)
def resizeEvent(self, event):
self.main_window.resizeEvent(event)
hideEvent = showEvent = moveEvent = resizeEvent
class TallyDock(PlotterDock):
def __init__(self, model, font_metric, parent=None):
super().__init__(model, font_metric, parent)
self.setAllowedAreas(QtCore.Qt.RightDockWidgetArea)
# Dock maps for tally information
self.tally_map = {}
self.filter_map = {}
self.score_map = {}
self.nuclide_map = {}
# Tally selector
self.tallySelectorLayout = QFormLayout()
self.tallySelector = QComboBox(self)
self.tallySelector.currentTextChanged[str].connect(
self.main_window.editSelectedTally)
self.tallySelectorLayout.addRow(self.tallySelector)
self.tallySelectorLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.tallySelectorLayout.setFieldGrowthPolicy(
QFormLayout.AllNonFixedFieldsGrow)
# Add selector to its own box
self.tallyGroupBox = QGroupBox('Selected Tally')
self.tallyGroupBox.setLayout(self.tallySelectorLayout)
# Create submit button
self.applyButton = QPushButton("Apply Changes")
self.applyButton.setMinimumHeight(self.font_metric.height() * 1.6)
self.applyButton.clicked.connect(self.main_window.applyChanges)
# Color options section
self.tallyColorForm = ColorForm(self.model, self.main_window, 'tally')
self.scoresGroupBox = Expander(title="Scores:")
self.scoresListWidget = QListWidget()
self.nuclidesListWidget = QListWidget()
# Main layout
self.dockLayout = QVBoxLayout()
self.dockLayout.addWidget(QLabel("Tallies"))
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.tallyGroupBox)
self.dockLayout.addStretch()
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.tallyColorForm)
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.applyButton)
# Create widget for dock and apply main layout
self.scroll = QScrollArea()
self.scroll.setWidgetResizable(True)
self.widget = QWidget()
self.widget.setLayout(self.dockLayout)
self.scroll.setWidget(self.widget)
self.setWidget(self.scroll)
def _createFilterTree(self, spatial_filters):
av = self.model.activeView
tally = self.model.statepoint.tallies[av.selectedTally]
filters = tally.filters
# create a tree for the filters
self.treeLayout = QVBoxLayout()
self.filterTree = QTreeWidget()
self.treeLayout.addWidget(self.filterTree)
self.treeExpander = Expander("Filters:", layout=self.treeLayout)
self.treeExpander.expand() # start with filters expanded
header = QTreeWidgetItem(["Filters"])
self.filterTree.setHeaderItem(header)
self.filterTree.setItemHidden(header, True)
self.filterTree.setColumnCount(1)
self.filterTree.itemChanged.connect(self.updateFilters)
self.filter_map = {}
self.bin_map = {}
for tally_filter in filters:
filter_label = str(type(tally_filter)).split(".")[-1][:-2]
filter_item = QTreeWidgetItem(self.filterTree, (filter_label,))
self.filter_map[tally_filter] = filter_item
# make checkable
if not spatial_filters:
filter_item.setFlags(QtCore.Qt.ItemIsUserCheckable)
filter_item.setToolTip(0, "Only tallies with spatial filters are viewable.")
else:
filter_item.setFlags(filter_item.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)
filter_item.setCheckState(0, QtCore.Qt.Unchecked)
# all mesh bins are selected by default and not shown in the dock
if isinstance(tally_filter, openmc.MeshFilter):
filter_item.setCheckState(0, QtCore.Qt.Checked)
filter_item.setFlags(QtCore.Qt.ItemIsUserCheckable)
filter_item.setToolTip(0, "All Mesh bins are selected automatically")
continue
def _bin_sort_val(bin):
if isinstance(bin, Iterable) and all([isinstance(val, float) for val in bin]):
return np.sum(bin)
else:
return bin
for bin in sorted(tally_filter.bins, key=_bin_sort_val):
item = QTreeWidgetItem(filter_item, [str(bin),])
if not spatial_filters:
item.setFlags(QtCore.Qt.ItemIsUserCheckable)
item.setToolTip(0, "Only tallies with spatial filters are viewable.")
else:
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(0, QtCore.Qt.Unchecked)
bin = bin if not isinstance(bin, Iterable) else tuple(bin)
self.bin_map[tally_filter, bin] = item
# start with all filters selected if spatial filters are present
if spatial_filters:
filter_item.setCheckState(0, QtCore.Qt.Checked)
def selectFromModel(self):
cv = self.model.currentView
self.selectedTally(cv.selectedTally)
def selectTally(self, tally_label=None):
# using active view to populate tally options live
av = self.model.activeView
# reset form layout
for i in reversed(range(self.tallySelectorLayout.count())):
self.tallySelectorLayout.itemAt(i).widget().setParent(None)
# always re-add the tally selector to the layout
self.tallySelectorLayout.addRow(self.tallySelector)
self.tallySelectorLayout.addRow(HorizontalLine())
if tally_label is None or tally_label == "None" or tally_label == "":
av.selectedTally = None
self.score_map = None
self.nuclide_map = None
self.filter_map = None
av.tallyValue = "Mean"
else:
# get the tally
tally = self.model.statepoint.tallies[av.selectedTally]
# populate filters
filter_types = {type(f) for f in tally.filters}
spatial_filters = bool(filter_types.intersection(_SPATIAL_FILTERS))
if not spatial_filters:
self.filter_description = QLabel("(No Spatial Filters)")
self.tallySelectorLayout.addRow(self.filter_description)
self._createFilterTree(spatial_filters)
self.tallySelectorLayout.addRow(self.treeExpander)
self.tallySelectorLayout.addRow(HorizontalLine())
# value selection
self.tallySelectorLayout.addRow(QLabel("Value:"))
self.valueBox = QComboBox(self)
self.values = tuple(_TALLY_VALUES.keys())
for value in self.values:
self.valueBox.addItem(value)
self.tallySelectorLayout.addRow(self.valueBox)
self.valueBox.currentTextChanged[str].connect(
self.main_window.editTallyValue)
self.updateTallyValue()
if not spatial_filters:
self.valueBox.setEnabled(False)
self.valueBox.setToolTip("Only tallies with spatial filters are viewable.")
# scores
self.score_map = {}
self.scoresListWidget.itemClicked.connect(
self.main_window.updateScores)
self.score_map.clear()
self.scoresListWidget.clear()
sorted_scores = sorted(tally.scores)
# always put total first if present
if 'total' in sorted_scores:
idx = sorted_scores.index('total')
sorted_scores.insert(0, sorted_scores.pop(idx))
for score in sorted_scores:
ql = QListWidgetItem()
ql.setText(score.capitalize())
ql.setCheckState(QtCore.Qt.Unchecked)
if not spatial_filters:
ql.setFlags(QtCore.Qt.ItemIsUserCheckable)
else:
ql.setFlags(ql.flags() | QtCore.Qt.ItemIsUserCheckable)
ql.setFlags(ql.flags() & ~QtCore.Qt.ItemIsSelectable)
self.score_map[score] = ql
self.scoresListWidget.addItem(ql)
# select the first score item by default
for item in self.score_map.values():
item.setCheckState(QtCore.Qt.Checked)
break
self.updateScores()
self.scoresGroupBoxLayout = QVBoxLayout()
self.scoresGroupBoxLayout.addWidget(self.scoresListWidget)
self.scoresGroupBox = Expander("Scores:", layout=self.scoresGroupBoxLayout)
self.tallySelectorLayout.addRow(self.scoresGroupBox)
# nuclides
self.nuclide_map = {}
self.nuclidesListWidget.itemClicked.connect(self.main_window.updateNuclides)
self.nuclide_map.clear()
self.nuclidesListWidget.clear()
sorted_nuclides = sorted(tally.nuclides)
# always put total at the top
if 'total' in sorted_nuclides:
idx = sorted_nuclides.index('total')
sorted_nuclides.insert(0, sorted_nuclides.pop(idx))
for nuclide in sorted_nuclides:
ql = QListWidgetItem()
ql.setText(nuclide.capitalize())
ql.setCheckState(QtCore.Qt.Unchecked)
if not spatial_filters:
ql.setFlags(QtCore.Qt.ItemIsUserCheckable)
else:
ql.setFlags(ql.flags() | QtCore.Qt.ItemIsUserCheckable)
ql.setFlags(ql.flags() & ~QtCore.Qt.ItemIsSelectable)
self.nuclide_map[nuclide] = ql
self.nuclidesListWidget.addItem(ql)
# select the first nuclide item by default
for item in self.nuclide_map.values():
item.setCheckState(QtCore.Qt.Checked)
break
self.updateNuclides()
self.nuclidesGroupBoxLayout = QVBoxLayout()
self.nuclidesGroupBoxLayout.addWidget(self.nuclidesListWidget)
self.nuclidesGroupBox = Expander("Nuclides:", layout=self.nuclidesGroupBoxLayout)
self.tallySelectorLayout.addRow(self.nuclidesGroupBox)
def updateMinMax(self):
self.tallyColorForm.updateMinMax()
def updateTallyValue(self):
cv = self.model.currentView
idx = self.valueBox.findText(cv.tallyValue)
self.valueBox.setCurrentIndex(idx)
def updateSelectedTally(self):
cv = self.model.currentView
idx = 0
if cv.selectedTally:
idx = self.tallySelector.findData(cv.selectedTally)
self.tallySelector.setCurrentIndex(idx)
def updateFilters(self):
applied_filters = defaultdict(tuple)
for f, f_item in self.filter_map.items():
if type(f) == openmc.MeshFilter:
continue
filter_checked = f_item.checkState(0)
if filter_checked != QtCore.Qt.Unchecked:
selected_bins = []
for idx, b in enumerate(f.bins):
b = b if not isinstance(b, Iterable) else tuple(b)
bin_checked = self.bin_map[(f, b)].checkState(0)
if bin_checked == QtCore.Qt.Checked:
selected_bins.append(idx)
applied_filters[f] = tuple(selected_bins)
self.model.appliedFilters = applied_filters
def updateScores(self):
applied_scores = []
for score, score_box in self.score_map.items():
if score_box.checkState() == QtCore.Qt.CheckState.Checked:
applied_scores.append(score)
self.model.appliedScores = tuple(applied_scores)
if not applied_scores:
# if no scores are selected, enable all scores again
for score, score_box in self.score_map.items():
sunits = _SCORE_UNITS.get(score, _REACTION_UNITS)
empty_item = QListWidgetItem()
score_box.setFlags(empty_item.flags() | QtCore.Qt.ItemIsUserCheckable)
score_box.setFlags(empty_item.flags() & ~QtCore.Qt.ItemIsSelectable)
elif 'total' in applied_scores:
self.model.appliedScores = ('total',)
# if total is selected, disable all other scores
for score, score_box in self.score_map.items():
if score != 'total':
score_box.setFlags(QtCore.Qt.ItemIsUserCheckable)
score_box.setToolTip("De-select 'total' to enable other scores")
else:
# get units of applied scores
selected_units = _SCORE_UNITS.get(applied_scores[0], _REACTION_UNITS)
# disable scores with incompatible units
for score, score_box in self.score_map.items():
sunits = _SCORE_UNITS.get(score, _REACTION_UNITS)
if sunits != selected_units:
score_box.setFlags(QtCore.Qt.ItemIsUserCheckable)
score_box.setToolTip("Score is incompatible with currently selected scores")
else:
score_box.setFlags(score_box.flags() | QtCore.Qt.ItemIsUserCheckable)
score_box.setFlags(score_box.flags() & ~QtCore.Qt.ItemIsSelectable)
def updateNuclides(self):
applied_nuclides = []
for nuclide, nuclide_box in self.nuclide_map.items():
if nuclide_box.checkState() == QtCore.Qt.CheckState.Checked:
applied_nuclides.append(nuclide)
self.model.appliedNuclides = tuple(applied_nuclides)
if 'total' in applied_nuclides:
self.model.appliedNuclides = ['total',]
for nuclide, nuclide_box in self.nuclide_map.items():
if nuclide != 'total':
nuclide_box.setFlags(QtCore.Qt.ItemIsUserCheckable)
nuclide_box.setToolTip("De-select 'total' to enable other nuclides")
elif not applied_nuclides:
# if no nuclides are selected, enable all nuclides again
for nuclide, nuclide_box in self.nuclide_map.items():
empty_item = QListWidgetItem()
nuclide_box.setFlags(empty_item.flags() | QtCore.Qt.ItemIsUserCheckable)
nuclide_box.setFlags(empty_item.flags() & ~QtCore.Qt.ItemIsSelectable)
def update(self):
# update the color form
self.tallyColorForm.update()
if self.model.statepoint:
self.tallySelector.clear()
self.tallySelector.setEnabled(True)
self.tallySelector.addItem("None")
for idx, tally in enumerate(self.model.statepoint.tallies.values()):
if tally.name == "":
self.tallySelector.addItem('Tally {}'.format(tally.id), userData=tally.id)
else:
self.tallySelector.addItem('Tally {} "{}"'.format(tally.id, tally.name), userData=tally.id)
self.tally_map[idx] = tally
self.updateSelectedTally()
self.updateMinMax()
else:
self.tallySelector.clear()
self.tallySelector.setDisabled(True)
class ColorForm(QWidget):
"""
Class for handling a field with a colormap, alpha, and visibility
Attributes
----------
model : PlotModel
The model instance used when updating information on the form.
colormapBox : QComboBox
Holds the string of the matplotlib colorbar being used
visibilityBox : QCheckBox
Indicator for whether or not the field should be visible
alphaBox : QDoubleSpinBox
Holds the alpha value for the displayed field data
colormapBox : QComboBox
Selector for colormap
dataIndicatorCheckBox : QCheckBox
Inidcates whether or not the data indicator will appear on the colorbar
userMinMaxBox : QCheckBox
Indicates whether or not the user defined values in the min and max
will be used to set the bounds of the colorbar.
maxBox : ScientificDoubleSpinBox
Max value of the colorbar. If the userMinMaxBox is checked, this will be
the user's input. If the userMinMaxBox is not checked, this box will
hold the max value of the visible data.
minBox : ScientificDoubleSpinBox
Min value of the colorbar. If the userMinMaxBox is checked, this will be
the user's input. If the userMinMaxBox is not checked, this box will
hold the max value of the visible data.
scaleBox : QCheckBox
Indicates whether or not the data is displayed on a log or linear
scale
maskZeroBox : QCheckBox
Indicates whether or not values equal to zero are displayed
clipDataBox : QCheckBox
Indicates whether or not values outside the min/max are displayed
contoursBox : QCheckBox
Inidicates whether or not data is displayed as contours
contourLevelsLine : QLineEdit
Controls the contours of the data. If this line contains a single
integer, that number of levels is used to display the data. If a
comma-separated set of values is entered, those values will be used as
levels in the contour plot.
"""
def __init__(self, model, main_window, field, colormaps=None):
super().__init__()
self.model = model
self.main_window = main_window
self.field = field
self.layout = QFormLayout()
# Visibility check box
self.visibilityBox = QCheckBox()
visible_connector = partial(main_window.toggleTallyVisibility)
self.visibilityBox.stateChanged.connect(visible_connector)
# Alpha value
self.alphaBox = QDoubleSpinBox()
self.alphaBox.setDecimals(2)
self.alphaBox.setRange(0, 1)
self.alphaBox.setSingleStep(0.05)
alpha_connector = partial(main_window.editTallyAlpha)
self.alphaBox.valueChanged.connect(alpha_connector)
# Color map selector
self.colormapBox = QComboBox()
if colormaps is None:
colormaps = sorted(m for m in mcolormaps.datad if not m.endswith("_r"))
for colormap in colormaps:
self.colormapBox.addItem(colormap)
cmap_connector = partial(main_window.editTallyDataColormap)
self.colormapBox.currentTextChanged[str].connect(cmap_connector)
# Data indicator line check box
self.dataIndicatorCheckBox = QCheckBox()
data_indicator_connector = partial(main_window.toggleTallyDataIndicator)
self.dataIndicatorCheckBox.stateChanged.connect(data_indicator_connector)
# User specified min/max check box
self.userMinMaxBox = QCheckBox()
minmax_connector = partial(main_window.toggleTallyDataUserMinMax)
self.userMinMaxBox.stateChanged.connect(minmax_connector)
# Data min spin box
self.minBox = ScientificDoubleSpinBox()
self.minBox.setMinimum(0.0)
min_connector = partial(main_window.editTallyDataMin)
self.minBox.valueChanged.connect(min_connector)
# Data max spin box
self.maxBox = ScientificDoubleSpinBox()
self.maxBox.setMinimum(0.0)
max_connector = partial(main_window.editTallyDataMax)
self.maxBox.valueChanged.connect(max_connector)
# Linear/Log scaling check box
self.scaleBox = QCheckBox()
scale_connector = partial(main_window.toggleTallyLogScale)
self.scaleBox.stateChanged.connect(scale_connector)
# Masking of zero values check box
self.maskZeroBox = QCheckBox()
zero_connector = partial(main_window.toggleTallyMaskZero)
self.maskZeroBox.stateChanged.connect(zero_connector)
# Clip data to min/max check box
self.clipDataBox = QCheckBox()
clip_connector = partial(main_window.toggleTallyDataClip)
self.clipDataBox.stateChanged.connect(clip_connector)
# Display data as contour plot check box
self.contoursBox = QCheckBox()
self.contoursBox.stateChanged.connect(main_window.toggleTallyContours)
self.contourLevelsLine = QLineEdit()
self.contourLevelsLine.textChanged.connect(
main_window.editTallyContourLevels)
# Organize widgets on layout
self.layout.addRow("Visible:", self.visibilityBox)
self.layout.addRow("Alpha: ", self.alphaBox)
self.layout.addRow("Colormap: ", self.colormapBox)
self.layout.addRow("Data Indicator: ", self.dataIndicatorCheckBox)
self.layout.addRow("Custom Min/Max: ", self.userMinMaxBox)
self.layout.addRow("Min: ", self.minBox)
self.layout.addRow("Max: ", self.maxBox)
self.layout.addRow("Log Scale: ", self.scaleBox)
self.layout.addRow("Clip Data: ", self.clipDataBox)
self.layout.addRow("Mask Zeros: ", self.maskZeroBox)
self.layout.addRow("Contours: ", self.contoursBox)
self.layout.addRow("Contour Levels:", self.contourLevelsLine)
self.setLayout(self.layout)
def updateTallyContours(self):
cv = self.model.currentView
self.contoursBox.setChecked(cv.tallyContours)
self.contourLevelsLine.setText(cv.tallyContourLevels)
def updateDataIndicator(self):
cv = self.model.currentView
self.dataIndicatorCheckBox.setChecked(cv.tallyDataIndicator)
def setMinMaxEnabled(self, enable):
enable = bool(enable)
self.minBox.setEnabled(enable)
self.maxBox.setEnabled(enable)
def updateMinMax(self):
cv = self.model.currentView
self.minBox.setValue(cv.tallyDataMin)
self.maxBox.setValue(cv.tallyDataMax)
self.setMinMaxEnabled(cv.tallyDataUserMinMax)
def updateTallyVisibility(self):
cv = self.model.currentView
self.visibilityBox.setChecked(cv.tallyDataVisible)
def updateMaskZeros(self):
cv = self.model.currentView
self.maskZeroBox.setChecked(cv.tallyMaskZeroValues)
def updateDataClip(self):
cv = self.model.currentView
self.clipDataBox.setChecked(cv.clipTallyData)
def update(self):
cv = self.model.currentView
# set colormap value in selector
cmap = cv.tallyDataColormap
idx = self.colormapBox.findText(cmap, QtCore.Qt.MatchFixedString)
self.colormapBox.setCurrentIndex(idx)
self.alphaBox.setValue(cv.tallyDataAlpha)
self.visibilityBox.setChecked(cv.tallyDataVisible)
self.userMinMaxBox.setChecked(cv.tallyDataUserMinMax)
self.scaleBox.setChecked(cv.tallyDataLogScale)
self.updateMinMax()
self.updateMaskZeros()
self.updateDataClip()
self.updateDataIndicator()
self.updateTallyContours()
|
the-stack_0_10264 | PROJECT_NAME = 'Olympus Programming'
DEBUG_PROJECT_NAME = 'Coursework'
IP = '185.255.132.221'
PORT = '80'
WORKING_DIRECTORY = '/root/project' # Only for server
LOCAL_WORKING_DIRECTORY = 'G://Projects/Coursework' # On my pc
solution_lang = {
'GNU GCC C99': 'c',
'GNU G++ 17': 'cpp',
# 'Kotlin': 'kt',
'Python 3': 'py',
'PyPy': 'pypy',
# 'Ruby 2.7': 'rb',
}
verdict = {
True: 'Правильное решение',
# Codes of status of task checking:
# WANNA ENUM...
# But I am too lazy to use it
'process': 'Выполняется проверка',
}
valid_image_formats = [
'png',
'jpg',
'jpeg',
]
annotation = {
'task_manager': {
'package': 'It must be a class inherited from the class SolutionCaseBase',
'task': 'It must be a class inherited from the class TaskBase',
'tests': 'It must be a class inherited from the class TestBase',
}
}
|
the-stack_0_10268 | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('index.urls')),
path('admin/', admin.site.urls),
path('users/', include('accounts.urls')),
path('board/', include('board.urls')),
path('posts/', include('posts.urls')),
path('search/', include('search.urls')),
# path('rest-auth/', include('rest_auth.urls')),
# path('rest-auth/signup/', include('rest_auth.registration.urls')),
]
|
the-stack_0_10272 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from __future__ import absolute_import
import pytest
# Allow everything in there to access the DB
pytestmark = pytest.mark.django_db
from django.db import IntegrityError
from django.db.models import ProtectedError
from django.conf import settings
from django.core.exceptions import ValidationError as DjangoValidationError
import ipaddress
import logging
from nsot import exc, models
from .fixtures import admin_user, circuit, device, site, user, transactional_db
def test_creation(device):
"""Test basic Circuit creation."""
site = device.site
# Create a network for interface assignments
network = models.Network.objects.create(
cidr='10.32.0.0/24', site=site,
)
# A-side device/interface and child interface
device_a = device
iface_a = models.Interface.objects.create(
device=device_a, name='ae0', addresses=['10.32.0.1/32']
)
child_iface_a = models.Interface.objects.create(
device=device_a, name='ae0.0', addresses=['10.32.0.3/32'], parent=iface_a
)
# Z-side device/interface and child interface
device_z = models.Device.objects.create(
hostname='foo-bar2', site=site
)
iface_z = models.Interface.objects.create(
device=device_z, name='ae0', addresses=['10.32.0.2/32']
)
child_iface_z = models.Interface.objects.create(
device=device_z, name='ae0.0', addresses=['10.32.0.4/32'], parent=iface_z
)
# Create the circuits
circuit = models.Circuit.objects.create(
endpoint_a=iface_a, endpoint_z=iface_z
)
circuit_for_child_ifaces = models.Circuit.objects.create(
endpoint_a=child_iface_a, endpoint_z=child_iface_z
)
# Interface inherits endpoint_a's site
assert circuit.site == iface_a.site
# Name should be slugs of A/Z interfaces joined by '_'
expected_name_t = '{endpoint_a}_{endpoint_z}'
expected_name = expected_name_t.format(
endpoint_a=iface_a, endpoint_z=iface_z
)
assert circuit.name == expected_name
# Name slug should be the slugified version of the name
assert circuit.name_slug == expected_name.replace('/', '_')
# Assert property values
assert circuit.interfaces == [iface_a, iface_z]
assert [str(a) for a in circuit.addresses] == ['10.32.0.1/32', '10.32.0.3/32', \
'10.32.0.2/32', '10.32.0.4/32']
assert circuit.devices == [device_a, device_z]
# Try to create another circuit w/ the same interfaces (expecting Django
# validation error)
with pytest.raises(DjangoValidationError):
c2 = models.Circuit.objects.create(
endpoint_a=iface_a, endpoint_z=iface_z
)
# ... Or with A/Z sides swapped (expecting DRF validation error).
with pytest.raises(exc.ValidationError):
c2 = models.Circuit.objects.create(
endpoint_a=iface_z, endpoint_z=iface_a
)
def test_attributes(circuit):
"""Test that attributes work as expected."""
models.Attribute.objects.create(
site=circuit.site, resource_name='Circuit', name='cid'
)
models.Attribute.objects.create(
site=circuit.site, resource_name='Circuit', name='vendor'
)
# Set attributes
attrs = {'cid': 'abc123', 'vendor': 'acme'}
circuit.set_attributes(attrs)
assert circuit.get_attributes() == attrs
# Test a sinmple set query just for kicks.
query_result = models.Circuit.objects.set_query('cid=abc123 vendor=acme')
assert list(query_result) == [circuit]
# Verify that we can zero out attributes
circuit.set_attributes({})
assert circuit.get_attributes() == {}
# And make sure no bogus attributes can be set.
with pytest.raises(exc.ValidationError):
circuit.set_attributes(None)
with pytest.raises(exc.ValidationError):
circuit.set_attributes({0: 'value'})
with pytest.raises(exc.ValidationError):
circuit.set_attributes({'key': 0})
with pytest.raises(exc.ValidationError):
circuit.set_attributes({'made_up': 'value'})
class TestInterfaceFor(object):
@pytest.fixture
def device_z(self, site):
return models.Device.objects.create(site=site, hostname='foo-bar2')
@pytest.fixture
def interface_a(self, device):
return models.Interface.objects.create(device=device, name='eth0')
@pytest.fixture
def interface_z(self, device_z):
return models.Interface.objects.create(
device=device_z, name='eth0')
@pytest.fixture
def normal_circuit(self, device_z, interface_a, interface_z):
return models.Circuit.objects.create(
endpoint_a=interface_a,
endpoint_z=interface_z
)
@pytest.fixture
def looped_circuit(self, device, interface_a):
interface_z = models.Interface.objects.create(
device=device,
name='eth1'
)
return models.Circuit.objects.create(
endpoint_a=interface_a,
endpoint_z=interface_z,
)
def test_normal_conditions(self, device, device_z, interface_a,
interface_z, normal_circuit):
assert normal_circuit.interface_for(device) == interface_a
print('interface_z via circuit id = {}'.format(normal_circuit.endpoint_z.id))
print('interface_z id = {}'.format(interface_z.id))
assert normal_circuit.interface_for(device_z) == interface_z
def test_single_sided(self, device, interface_a):
"""
Make sure things don't blow up on a single-sided circuit
"""
circuit = models.Circuit.objects.create(endpoint_a=interface_a)
assert circuit.interface_for(device) == interface_a
def test_looped_circuit(self, device, looped_circuit, interface_a):
"""
Test the case when both sides of a circuit are connected to the same
device. The method should return endpoint_a in this case.
"""
assert looped_circuit.interface_for(device) == interface_a
def test_bogus_device(self, device, device_z, looped_circuit):
"""
interface_for should return None when given a device that isn't
connected by the circuit
"""
assert looped_circuit.interface_for(device_z) is None
assert looped_circuit.interface_for(device) is not None
|
the-stack_0_10275 | # -*- coding: utf-8 -*-
#
# Developed by Alex Jercan <[email protected]>
#
# References:
#
import os
import torch
def tensors_to_device(tensors, device):
return (tensor.to(device, non_blocking=True) for tensor in tensors)
def init_weights(m):
if type(m) == torch.nn.Conv2d or type(m) == torch.nn.Conv3d or \
type(m) == torch.nn.ConvTranspose2d or type(m) == torch.nn.ConvTranspose3d:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif type(m) == torch.nn.BatchNorm2d or type(m) == torch.nn.BatchNorm3d:
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif type(m) == torch.nn.Linear:
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
def set_parameter_requires_grad(model):
for param in model.parameters():
param.requires_grad = False
def load_checkpoint(model, checkpoint_file, device):
checkpoint = torch.load(checkpoint_file, map_location=device)
init_epoch = checkpoint['epoch_idx'] + 1
model.load_state_dict(checkpoint['state_dict'])
return init_epoch, model
def save_checkpoint(epoch_idx, model, dir_checkpoints):
file_name = 'checkpoint-epoch-%03d.pth' % (epoch_idx)
output_path = os.path.join(dir_checkpoints, file_name)
if not os.path.exists(dir_checkpoints):
os.makedirs(dir_checkpoints)
checkpoint = {
'epoch_idx': epoch_idx,
'state_dict': model.state_dict(),
}
torch.save(checkpoint, output_path) |
the-stack_0_10276 | ######################################################
# #
# SOCIALFISH v2.0 #
# #
# by: vaon4ik #
# #
# Telegram Group: https://t.me/joinchat/PMg-a1UcFlsyE___0SuKiQ #
#
# #
# #
######################################################
from contextlib import contextmanager
import json
import multiprocessing
import requests
import os
from time import sleep
from huepy import *
import subprocess
from core.email import send_mail
from core.credentials import credentials
from smtplib import SMTPSenderRefused, SMTPServerDisconnected
from time import strftime
def runPhishing(social, custom):
global _social
_social = social
os.system('rm -Rf base/Server/www/*.* && touch base/Server/www/cat.txt')
command = 'cp base/WebPages/%s/*.* base/Server/www/' % social.lower()
os.system(command)
with open('base/Server/www/login.php') as f:
read_data = f.read()
c = read_data.replace('<CUST0M>', custom)
f = open('base/Server/www/login.php', 'w')
f.write(c)
f.close()
def waitCreds():
print(cyan(" [*] Waiting for credentials... "))
while True:
with open('base/Server/www/cat.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
print(green('\n [*] Credentials found:\n %s' % lines))
os.system('rm -rf base/Server/www/cat.txt && touch base/Server/www/cat.txt')
try:
credentials(lines.split('\n'), _social)
send_mail(lines.split('\n'),_social)
except NameError:
pass
except SMTPSenderRefused:
print(red(' [!] Sorry, sender refused :('))
pass
except SMTPServerDisconnected:
pass
@contextmanager
def runServer(port: int):
def php_process():
os.system("cd base/Server/www/ && php -n -S 127.0.0.1:%d > /dev/null 2>&1 &" % port)
php_process = multiprocessing.Process(target=php_process)
php_process.start()
yield php_process
php_process.terminate()
php_process.close()
@contextmanager
def ngrok_start(port: int):
ngrok_process = subprocess.Popen(
['./base/Server/ngrok','http','%s' % port],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
while True:
try:
ngrok_url = requests.get('http://127.0.0.1:4040/api/tunnels/command_line')
if ngrok_url.status_code == 200:
public_url = json.loads(ngrok_url.text)['public_url']
print(green(' [~] Ready to Phishing'))
print(lightgreen(' [*] Ngrok URL: %s' % public_url))
print(green(' [~] Your logs are being stored in: Logs/{}').format(_social + strftime('-%y%m%d.txt')))
print(yellow(' [^] Press Ctrl+C or VolDown+C(android) to quit'))
yield public_url
break
except requests.exceptions.ConnectionError:
sleep(.5)
os.kill(ngrok_process.pid, 15)
def PhishingServer(port: int=1449):
with ngrok_start(port) as ngrok:
with runServer(port) as php:
waitCreds()
|
the-stack_0_10277 | import os, itertools
import numpy as np
from ofTools.util.FileTools import save_yaml
def save_case_matrix_direct(case_list, dir_matrix):
### assumes all elements of the list are dict for that case that has the same keys!
if not os.path.exists(dir_matrix):
os.makedirs(dir_matrix)
ofh = open(os.path.join(dir_matrix,'case_matrix.txt'),'w')
case = case_list[0]
for key in case.keys():
k = key[0]
ofh.write("%s " % k)
ofh.write("\n")
for key in case.keys():
k = key[1]
ofh.write("%s " % k)
ofh.write("\n")
for i in range(len(case_list)):
case = case_list[i]
for key in case.keys():
ofh.write(str(case[key]))
ofh.write(" ")
ofh.write("\n")
ofh.close()
def save_case_matrix(matrix_out, change_vars, dir_matrix):
# save matrix file
if type(change_vars[0]) is tuple:
n_header_lines = len(change_vars[0])
else:
change_vars = [(var,) for var in change_vars]
n_header_lines = 1
n_cases = np.shape(matrix_out)[0]
matrix_out = np.hstack((np.asarray([[i] for i in range(n_cases)]), matrix_out))
change_vars = [('Case_ID',)+('',)*(n_header_lines-1)] + change_vars
# col_len = [max([len(val) for val in matrix_out[:,j]] + [len(change_vars[j][0]), len(change_vars[j][1])]) for j in range(len(change_vars))]
col_len = [max([len(str(val)) for val in matrix_out[:,j]] + [len(change_vars[j][header_i]) for header_i in range(n_header_lines)]) for j in range(len(change_vars))]
text_out = []
for header_i in range(n_header_lines):
text_out.append(''.join([val.center(col+2) for val, col in zip([var[header_i] for var in change_vars], col_len)])+'\n')
for row in matrix_out:
row_str = ''
for val, col in zip(row, col_len):
if val is not str:
val = str(val)
row_str += val.center(col+2)
row_str += '\n'
text_out.append(row_str)
if not os.path.exists(dir_matrix):
os.makedirs(dir_matrix)
ofh = open(os.path.join(dir_matrix,'case_matrix.txt'),'w')
for row in text_out:
ofh.write(row)
ofh.close()
def save_case_matrix_yaml(matrix_out, change_vars, dir_matrix, case_names):
matrix_out_yaml = {}
for var in change_vars:
matrix_out_yaml[var] = []
matrix_out_yaml['Case_ID'] = []
matrix_out_yaml['Case_Name'] = []
for i, row in enumerate(matrix_out):
matrix_out_yaml['Case_ID'].append(i)
matrix_out_yaml['Case_Name'].append(case_names[i])
for val, var in zip(row, change_vars):
if type(val) is list:
if len(val) == 1:
val = val[0]
if type(val) in [np.float32, np.float64, np.single, np.double, np.longdouble]:
val = float(val)
elif type(val) in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.uintc, np.uint]:
val = int(val)
elif type(val) in [np.array, np.ndarray]:
val = val.tolist()
elif type(val) in [np.str_]:
val = str(val)
# elif len(val) > 0:
# val = val.tolist()
matrix_out_yaml[var].append(val)
if not os.path.exists(dir_matrix):
os.makedirs(dir_matrix)
save_yaml(dir_matrix, 'case_matrix.yaml', matrix_out_yaml)
def case_naming(n_cases, namebase=None):
# case naming
case_name = [('%d'%i).zfill(len('%d'%(n_cases-1))) for i in range(n_cases)]
if namebase:
case_name = [namebase+'_'+caseid for caseid in case_name]
return case_name
def convert_str(val):
def try_type(val, data_type):
try:
data_type(val)
return True
except:
return False
# return isinstance(val, data_type) ### this doesn't work b/c of numpy data types; they're not instances of base types
def try_list(val):
try:
val[0]
return True
except:
return False
if try_type(val, int) and int(val) == float(val):
return int(val)
elif try_type(val, float):
return float(val)
elif val=='True':
return True
elif val=='False':
return False
# elif type(val)!=str and try_list(val):
# return ", ".join(['{:}'.format(i) for i in val])
else:
return val
def CaseGen_General(case_inputs, dir_matrix='', namebase='', save_matrix=True):
""" Cartesian product to enumerate over all combinations of set of variables that are changed together"""
# put case dict into lists
change_vars = sorted(case_inputs.keys())
change_vals = [case_inputs[var]['vals'] for var in change_vars]
change_group = [case_inputs[var]['group'] for var in change_vars]
# find number of groups and length of groups
group_set = list(set(change_group))
group_len = [len(change_vals[change_group.index(i)]) for i in group_set]
# case matrix, as indices
group_idx = [range(n) for n in group_len]
matrix_idx = list(itertools.product(*group_idx))
# index of each group
matrix_group_idx = [np.where([group_i == group_j for group_j in change_group])[0].tolist() for group_i in group_set]
# build final matrix of variable values
matrix_out = []
for i, row in enumerate(matrix_idx):
row_out = [None]*len(change_vars)
for j, val in enumerate(row):
for g in matrix_group_idx[j]:
row_out[g] = change_vals[g][val]
matrix_out.append(row_out)
try:
matrix_out = np.asarray(matrix_out, dtype=str)
except:
matrix_out = np.asarray(matrix_out)
n_cases = np.shape(matrix_out)[0]
# case naming
case_name = case_naming(n_cases, namebase=namebase)
# Save case matrix
if save_matrix:
if not dir_matrix:
dir_matrix = os.getcwd()
try:
save_case_matrix(matrix_out, change_vars, dir_matrix)
save_case_matrix_yaml(matrix_out, change_vars, dir_matrix, case_name)
except:
save_case_matrix_yaml(matrix_out, change_vars, dir_matrix, case_name)
case_list = []
for i in range(n_cases):
case_list_i = {}
for j, var in enumerate(change_vars):
case_list_i[var] = convert_str(matrix_out[i,j])
case_list.append(case_list_i)
return case_list, case_name
if __name__ == "__main__":
case_inputs = {}
case_inputs[("Fst","TMax")] = {'vals':[10.], 'group':0}
case_inputs[("InflowWind","WindType")] = {'vals':[1], 'group':0}
case_inputs[("InflowWind","HWindSpeed")] = {'vals':[8., 9., 10., 11., 12.], 'group':1}
case_inputs[("ElastoDyn","RotSpeed")] = {'vals':[9.156, 10.296, 11.431, 11.89, 12.1], 'group':1}
case_inputs[("ElastoDyn","BlPitch1")] = {'vals':[0., 0., 0., 0., 3.823], 'group':1}
case_inputs[("ElastoDyn","BlPitch2")] = case_inputs[("ElastoDyn","BlPitch1")]
case_inputs[("ElastoDyn","BlPitch3")] = case_inputs[("ElastoDyn","BlPitch1")]
case_inputs[("ElastoDyn","GenDOF")] = {'vals':['True','False'], 'group':2}
case_list, case_name = CaseGen_General(case_inputs, 'C:/Users/egaertne/WISDEM/AeroelasticSE/src/AeroelasticSE/', 'testing')
|
the-stack_0_10283 | import cv2
import numpy as np
def main():
x = cv2.imread("x.jpg")
y = cv2.imread("y.jpg")
print(x[300,500])
print(y[300,500])
print(x[300,500]+y[300,500])
toplam = cv2.add(x,y)
cv2.imshow("toplam",toplam)
agirlikli_toplam = cv2.addWeighted(x,0.3,y,0.7,0)
cv2.imshow("ağırlıklı toplam",agirlikli_toplam)
print("X FOTO\nyükseklik : {}\ngenişlik : {}\nkanal sayısı : {}\n ".format(x.shape[0],x.shape[1],x.shape[2]))
print("Y FOTO\nyükseklik : {}\ngenişlik : {}\nkanal sayısı : {}\n ".format(y.shape[0], y.shape[1], y.shape[2]))
cv2.imshow("x.jpg",x)
cv2.imshow("y.jpg",y)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
the-stack_0_10284 | """Test asyncpraw.models.user."""
from asynctest import mock
import pytest
from asyncpraw.exceptions import RedditAPIException
from asyncpraw.models import Multireddit, Redditor, Subreddit
from .. import IntegrationTest
class TestUser(IntegrationTest):
async def test_blocked(self):
self.reddit.read_only = False
with self.use_cassette():
blocked = await self.reddit.user.blocked()
assert len(blocked) > 0
assert all(isinstance(user, Redditor) for user in blocked)
async def test_contributor_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.contributor_subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
async def test_friends(self):
self.reddit.read_only = False
with self.use_cassette():
friends = await self.reddit.user.friends()
assert len(friends) > 0
assert all(isinstance(friend, Redditor) for friend in friends)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
friend = await self.reddit.user.friends(user=await self.reddit.user.me())
assert isinstance(friend, Redditor)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_not_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
with pytest.raises(RedditAPIException):
await self.reddit.user.friends(user="fake__user_user_user")
async def test_karma(self):
self.reddit.read_only = False
with self.use_cassette():
karma = await self.reddit.user.karma()
assert isinstance(karma, dict)
for subreddit in karma:
assert isinstance(subreddit, Subreddit)
keys = sorted(karma[subreddit].keys())
assert ["comment_karma", "link_karma"] == keys
async def test_me(self):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
assert isinstance(me, Redditor)
me.praw_is_cached = True
me = await self.reddit.user.me()
assert me.praw_is_cached
@mock.patch("asyncio.sleep", return_value=None)
async def test_me__bypass_cache(self, _):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
me.praw_is_cached = True
me = await self.reddit.user.me(use_cache=False)
assert not hasattr(me, "praw_is_cached")
async def test_multireddits(self):
self.reddit.read_only = False
with self.use_cassette():
multireddits = await self.reddit.user.multireddits()
assert isinstance(multireddits, list)
assert multireddits
assert all(isinstance(x, Multireddit) for x in multireddits)
async def test_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
|
the-stack_0_10285 | import hetu as ht
import models
import os
import numpy as np
import argparse
import json
import logging
from time import time
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def print_rank0(msg):
if device_id == 0:
logger.info(msg)
if __name__ == "__main__":
# argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True,
help='model to be tested')
parser.add_argument('--dataset', type=str, required=True,
help='dataset to be trained on')
parser.add_argument('--batch-size', type=int,
default=128, help='batch size')
parser.add_argument('--learning-rate', type=float,
default=0.1, help='learning rate')
parser.add_argument('--opt', type=str, default='sgd',
help='optimizer to be used, default sgd; sgd / momentum / adagrad / adam')
parser.add_argument('--num-epochs', type=int,
default=10, help='epoch number')
parser.add_argument('--gpu', type=int, default=0,
help='gpu to be used, -1 means cpu')
parser.add_argument('--validate', action='store_true',
help='whether to use validation')
parser.add_argument('--timing', action='store_true',
help='whether to time the training phase')
parser.add_argument('--comm-mode', default=None, help='communication mode')
args = parser.parse_args()
global device_id
device_id = 0
print_rank0("Training {} on HETU".format(args.model))
if args.comm_mode in ('AllReduce', 'Hybrid'):
comm, device_id = ht.mpi_nccl_init()
executor_ctx = ht.gpu(device_id % 8) if args.gpu >= 0 else ht.cpu(0)
else:
if args.gpu == -1:
executor_ctx = ht.cpu(0)
print_rank0('Use CPU.')
else:
executor_ctx = ht.gpu(args.gpu)
print_rank0('Use GPU %d.' % args.gpu)
if args.comm_mode in ('PS', 'Hybrid'):
settings_file = open(os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'worker_conf%d.json' % args.gpu))
settings = json.load(settings_file)
for key in settings:
if type(settings[key]) == str:
os.environ[key] = settings[key]
else:
os.environ[key] = str(settings[key]) # type is str
assert args.model in ['alexnet', 'cnn_3_layers', 'lenet', 'logreg', 'lstm', 'mlp', 'resnet18', 'resnet34', 'rnn', 'vgg16', 'vgg19'], \
'Model not supported!'
model = eval('models.' + args.model)
assert args.dataset in ['MNIST', 'CIFAR10', 'CIFAR100', 'ImageNet']
dataset = args.dataset
assert args.opt in ['sgd', 'momentum', 'nesterov',
'adagrad', 'adam'], 'Optimizer not supported!'
if args.opt == 'sgd':
print_rank0('Use SGD Optimizer.')
opt = ht.optim.SGDOptimizer(learning_rate=args.learning_rate)
elif args.opt == 'momentum':
print_rank0('Use Momentum Optimizer.')
opt = ht.optim.MomentumOptimizer(learning_rate=args.learning_rate)
elif args.opt == 'nesterov':
print_rank0('Use Nesterov Momentum Optimizer.')
opt = ht.optim.MomentumOptimizer(
learning_rate=args.learning_rate, nesterov=True)
elif args.opt == 'adagrad':
print_rank0('Use AdaGrad Optimizer.')
opt = ht.optim.AdaGradOptimizer(
learning_rate=args.learning_rate, initial_accumulator_value=0.1)
else:
print_rank0('Use Adam Optimizer.')
opt = ht.optim.AdamOptimizer(learning_rate=args.learning_rate)
# data loading
print_rank0('Loading %s data...' % dataset)
if dataset == 'MNIST':
datasets = ht.data.mnist()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# train_set_x: (50000, 784), train_set_y: (50000, 10)
# valid_set_x: (10000, 784), valid_set_y: (10000, 10)
# x_shape = (args.batch_size, 784)
# y_shape = (args.batch_size, 10)
elif dataset == 'CIFAR10':
train_set_x, train_set_y, valid_set_x, valid_set_y = ht.data.normalize_cifar(
num_class=10)
if args.model == "mlp":
train_set_x = train_set_x.reshape(train_set_x.shape[0], -1)
valid_set_x = valid_set_x.reshape(valid_set_x.shape[0], -1)
# train_set_x: (50000, 3, 32, 32), train_set_y: (50000, 10)
# valid_set_x: (10000, 3, 32, 32), valid_set_y: (10000, 10)
# x_shape = (args.batch_size, 3, 32, 32)
# y_shape = (args.batch_size, 10)
elif dataset == 'CIFAR100':
train_set_x, train_set_y, valid_set_x, valid_set_y = ht.data.normalize_cifar(
num_class=100)
# train_set_x: (50000, 3, 32, 32), train_set_y: (50000, 100)
# valid_set_x: (10000, 3, 32, 32), valid_set_y: (10000, 100)
else:
raise NotImplementedError
# model definition
print_rank0('Building model {}'.format(args.model))
x = ht.dataloader_op([
ht.Dataloader(train_set_x, args.batch_size, 'train'),
ht.Dataloader(valid_set_x, args.batch_size, 'validate'),
])
y_ = ht.dataloader_op([
ht.Dataloader(train_set_y, args.batch_size, 'train'),
ht.Dataloader(valid_set_y, args.batch_size, 'validate'),
])
if args.model in ['resnet18', 'resnet34', 'vgg16', 'vgg19'] and args.dataset == 'CIFAR100':
loss, y = model(x, y_, 100)
else:
loss, y = model(x, y_)
train_op = opt.minimize(loss)
eval_nodes = {'train': [loss, y, y_, train_op], 'validate': [loss, y, y_]}
executor = ht.Executor(eval_nodes, ctx=executor_ctx,
comm_mode=args.comm_mode)
n_train_batches = executor.get_batch_num('train')
n_valid_batches = executor.get_batch_num('validate')
# training
print_rank0("Start training loop...")
running_time = 0
for i in range(args.num_epochs + 1):
print_rank0("Epoch %d" % i)
loss_all = 0
batch_num = 0
if args.timing:
start = time()
correct_predictions = []
for minibatch_index in range(n_train_batches):
loss_val, predict_y, y_val, _ = executor.run(
'train', eval_node_list=[loss, y, y_, train_op])
# Loss for this minibatch
predict_y = predict_y.asnumpy()
y_val = y_val.asnumpy()
loss_all += loss_val.asnumpy()
batch_num += 1
# Predict accuracy for this minibatch
correct_prediction = np.equal(
np.argmax(y_val, 1),
np.argmax(predict_y, 1)).astype(np.float32)
correct_predictions.extend(correct_prediction)
loss_all /= batch_num
accuracy = np.mean(correct_predictions)
print_rank0("Train loss = %f" % loss_all)
print_rank0("Train accuracy = %f" % accuracy)
if args.timing:
end = time()
during_time = end - start
print_rank0("Running time of current epoch = %fs" % (during_time))
if i != 0:
running_time += during_time
if args.validate:
val_loss_all = 0
batch_num = 0
correct_predictions = []
for minibatch_index in range(n_valid_batches):
loss_val, valid_y_predicted, y_val = executor.run(
'validate', eval_node_list=[loss, y, y_], convert_to_numpy_ret_vals=True)
val_loss_all += loss_val
batch_num += 1
correct_prediction = np.equal(
np.argmax(y_val, 1),
np.argmax(valid_y_predicted, 1)).astype(np.float32)
correct_predictions.extend(correct_prediction)
val_loss_all /= batch_num
accuracy = np.mean(correct_predictions)
print_rank0("Validation loss = %f" % val_loss_all)
print_rank0("Validation accuracy = %f" % accuracy)
print_rank0("*"*50)
print_rank0("Running time of total %d epoch = %fs" %
(args.num_epochs, running_time))
if args.comm_mode in ('AllReduce', 'Hybrid'):
ht.mpi_nccl_finish(comm)
|
the-stack_0_10287 | #!/usr/bin/env python
# http://pyode.sourceforge.net/tutorials/tutorial2.html
# pyODE example 2: Connecting bodies with joints
# modified by Gideon Klompje (removed literals and using
# 'ode.Mass.setSphereTotal' instead of 'ode.Mass.setSphere')
import ode
import pygame
from pygame.locals import QUIT, KEYDOWN
# Constants
WINDOW_RESOLUTION = (640, 480)
DRAW_SCALE = WINDOW_RESOLUTION[0] / 5
"""Factor to multiply physical coordinates by to obtain screen size in pixels"""
DRAW_OFFSET = (WINDOW_RESOLUTION[0] / 2, 50)
"""Screen coordinates (in pixels) that map to the physical origin (0, 0, 0)"""
BACKGROUND_COLOR = (255, 255, 255)
GRAVITY = (0, -9.81, 0)
SPHERE1_POSITION = (1, 0, 0)
SPHERE1_MASS = 1
SPHERE1_RADIUS = 0.15
SPHERE1_COLOR = (55, 0, 200)
SPHERE2_POSITION = (2, 0, 0)
SPHERE2_MASS = 1
SPHERE2_RADIUS = 0.15
SPHERE2_COLOR = (55, 0, 200)
JOINT1_ANCHOR = (0, 0, 0)
JOINT1_COLOR = (200, 0, 55)
JOINT1_WIDTH = 2
"""Width of the line (in pixels) representing the joint"""
JOINT2_ANCHOR = SPHERE1_POSITION
JOINT2_COLOR = (200, 0, 55)
JOINT2_WIDTH = 2
"""Width of the line (in pixels) representing the joint"""
TIME_STEP = 0.04
# Utility functions
def coord(x, y, integer=False):
"""
Convert world coordinates to pixel coordinates. Setting 'integer' to
True will return integer coordinates.
"""
xs = (DRAW_OFFSET[0] + DRAW_SCALE*x)
ys = (DRAW_OFFSET[1] - DRAW_SCALE*y)
if integer:
return int(round(xs)), int(round(ys))
else:
return xs, ys
# Initialize pygame
pygame.init()
# Open a display
screen = pygame.display.set_mode(WINDOW_RESOLUTION)
# Create a world object
world = ode.World()
world.setGravity(GRAVITY)
# Create two bodies
body1 = ode.Body(world)
M = ode.Mass()
M.setSphereTotal(SPHERE1_MASS, SPHERE1_RADIUS)
body1.setMass(M)
body1.setPosition(SPHERE1_POSITION)
body2 = ode.Body(world)
M = ode.Mass()
M.setSphereTotal(SPHERE2_MASS, SPHERE2_RADIUS)
body2.setMass(M)
body2.setPosition(SPHERE2_POSITION)
# Connect body1 with the static environment
j1 = ode.BallJoint(world)
j1.attach(body1, ode.environment)
j1.setAnchor(JOINT1_ANCHOR)
# Connect body2 with body1
j2 = ode.BallJoint(world)
j2.attach(body1, body2)
j2.setAnchor(JOINT2_ANCHOR)
# Simulation loop...
if __name__ == "__main__":
fps = 1.0 / TIME_STEP
clk = pygame.time.Clock()
sph1_rad = int(DRAW_SCALE * SPHERE1_RADIUS)
sph2_rad = int(DRAW_SCALE * SPHERE2_RADIUS)
loopFlag = True
while loopFlag:
for e in pygame.event.get():
if e.type==QUIT:
loopFlag=False
if e.type==KEYDOWN:
loopFlag=False
# Clear the screen
screen.fill(BACKGROUND_COLOR)
# Draw the two bodies and the lines representing the joints
x1, y1, z1 = body1.getPosition()
x2, y2, z2 = body2.getPosition()
xj1, yj1, zj1 = j1.getAnchor()
xj2, yj2, zj2 = j2.getAnchor()
pygame.draw.line(screen, JOINT1_COLOR, coord(xj1, yj1), coord(x1, y1), JOINT1_WIDTH)
pygame.draw.line(screen, JOINT2_COLOR, coord(xj2, yj2), coord(x2, y2), JOINT2_WIDTH)
pygame.draw.circle(screen, SPHERE1_COLOR, coord(x1, y1, integer=True), sph1_rad, 0)
pygame.draw.circle(screen, SPHERE2_COLOR, coord(x2, y2, integer=True), sph2_rad, 0)
pygame.display.flip()
# Next simulation step
world.step(TIME_STEP)
# Try to keep the specified framerate
clk.tick(fps)
|
the-stack_0_10288 | """
Inductive Representation Learning on Large Graphs
Paper: http://papers.nips.cc/paper/6703-inductive-representation-learning-on-large-graphs.pdf
Code: https://github.com/williamleif/graphsage-simple
Simple reference implementation of GraphSAGE.
"""
import argparse
import time
import abc
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
from dgl.data import register_data_args, load_data
from dgl.nn.pytorch.conv import SAGEConv
class GraphSAGE(nn.Module):
def __init__(self,
g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout,
aggregator_type):
super(GraphSAGE, self).__init__()
self.layers = nn.ModuleList()
self.g = g
# input layer
self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))
# output layer
self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type, feat_drop=dropout, activation=None)) # activation None
def forward(self, features):
h = features
for layer in self.layers:
h = layer(self.g, h)
return h
def evaluate(model, features, labels, mask):
model.eval()
with torch.no_grad():
logits = model(features)
logits = logits[mask]
labels = labels[mask]
_, indices = torch.max(logits, dim=1)
correct = torch.sum(indices == labels)
return correct.item() * 1.0 / len(labels)
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print("""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(n_edges, n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item()))
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
print("use cuda:", args.gpu)
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create GraphSAGE model
model = GraphSAGE(g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
F.relu,
args.dropout,
args.aggregator_type
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(epoch, np.mean(dur), loss.item(),
acc, n_edges / np.mean(dur) / 1000))
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GraphSAGE')
register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
parser.add_argument("--aggregator-type", type=str, default="gcn",
help="Aggregator type: mean/gcn/pool/lstm")
args = parser.parse_args()
print(args)
main(args)
|
the-stack_0_10289 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for multivariate von Mises-Fisher distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.von_mises_fisher import _bessel_ive
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class VonMisesFisherTest(tfp_test_util.VectorDistributionTestHelpers,
tf.test.TestCase):
def testBesselIve(self):
self.assertRaises(ValueError, lambda: _bessel_ive(2.0, 1.0))
# Zero is not a supported value for z.
self.assertRaises(tf.errors.InvalidArgumentError,
lambda: self.evaluate(_bessel_ive(1.5, 0.0)))
z = np.logspace(-6, 2, 20).astype(np.float64)
for v in np.float64([-0.5, 0, 0.5, 1, 1.5]):
try:
from scipy import special # pylint:disable=g-import-not-at-top
except ImportError:
tf.compat.v1.logging.warn('Skipping scipy-dependent tests')
return
self.assertAllClose(special.ive(v, z), _bessel_ive(v, z))
def testSampleMeanDir2d(self):
mean_dirs = tf.nn.l2_normalize([[1., 1],
[-2, 1],
[0, -1]], axis=-1)
concentration = [[0], [0.1], [2], [40], [1000]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dirs,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.assertEqual([5, 3], vmf.batch_shape.as_list())
self.assertEqual([2], vmf.event_shape.as_list())
nsamples = 12000
samples = vmf.sample(sample_shape=[nsamples])
self.assertEqual([nsamples, 5, 3, 2], samples.shape.as_list())
sample_mean = self.evaluate(samples).mean(axis=0)
# Assert that positive-concentration distributions have samples with
# the expected mean direction.
sample_dir = (
sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))
inner_product = self.evaluate(
tf.reduce_sum(input_tensor=sample_dir * vmf.mean_direction, axis=-1))
# All except the 0-concentration distribution should have >0 inner product
# with the mean direction of the distribution.
self.assertAllGreater(inner_product[1:], 0.1)
# Pick out >1 concentration distributions to assert ~1 inner product with
# mean direction.
self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],
atol=1e-3)
# Inner products should be roughly ascending by concentration.
self.assertAllEqual(np.round(np.sort(inner_product, axis=0), decimals=3),
np.round(inner_product, decimals=3))
means = self.evaluate(vmf.mean())
# Mean vector for 0-concentration is precisely (0, 0).
self.assertAllEqual(np.zeros_like(means[0]), means[0])
mean_lengths = np.linalg.norm(means, axis=-1)
# Length of the mean vector is strictly ascending with concentration.
self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))
self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,
atol=0.02)
def testSampleMeanDir3d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3],
[-2, -3, -1]], axis=-1)
concentration = [[0], [0.1], [2], [40], [1000]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.assertEqual([5, 2], vmf.batch_shape.as_list())
self.assertEqual([3], vmf.event_shape.as_list())
nsamples = int(2e4)
samples = vmf.sample(sample_shape=[nsamples])
self.assertEqual([nsamples, 5, 2, 3], samples.shape.as_list())
sample_mean = self.evaluate(samples).mean(axis=0)
# Assert that positive-concentration distributions have samples with
# the expected mean direction.
sample_dir = (
sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))
inner_product = self.evaluate(
tf.reduce_sum(input_tensor=sample_dir * vmf.mean_direction, axis=-1))
# All except the 0-concentration distribution should have >0 inner product
# with the mean direction of the distribution.
self.assertAllGreater(inner_product[1:], 0.1)
# Pick out >1 concentration distributions to assert ~1 inner product with
# mean direction.
self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],
atol=1e-3)
# Inner products should be roughly ascending by concentration.
self.assertAllEqual(np.round(np.sort(inner_product, axis=0), decimals=3),
np.round(inner_product, decimals=3))
means = self.evaluate(vmf.mean())
# Mean vector for 0-concentration is precisely (0, 0, 0).
self.assertAllEqual(np.zeros_like(means[0]), means[0])
mean_lengths = np.linalg.norm(means, axis=-1)
# Length of the mean vector is strictly ascending with concentration.
self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))
self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,
atol=0.02)
def _verifyPdfWithNumpy(self, vmf, atol=1e-4):
"""Verifies log_prob evaluations with numpy/scipy.
Both uniform random points and sampled points are evaluated.
Args:
vmf: A `tfp.distributions.VonMisesFisher` instance.
atol: Absolute difference tolerable.
"""
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 10
# Sample some random points uniformly over the hypersphere using numpy.
sample_shape = [nsamples] + vmf.batch_shape.as_list() + [dim]
uniforms = np.random.randn(*sample_shape)
uniforms /= np.linalg.norm(uniforms, axis=-1, keepdims=True)
uniforms = uniforms.astype(vmf.dtype.as_numpy_dtype)
# Concatenate in some sampled points from the distribution under test.
samples = tf.concat([uniforms, vmf.sample(sample_shape=[nsamples])], axis=0)
samples = tf.debugging.check_numerics(samples, 'samples')
samples = self.evaluate(samples)
log_prob = vmf.log_prob(samples)
log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')
try:
from scipy.special import gammaln # pylint: disable=g-import-not-at-top
from scipy.special import ive # pylint: disable=g-import-not-at-top
except ImportError:
tf.compat.v1.logging.warn('Unable to use scipy in tests')
return
conc = self.evaluate(vmf.concentration)
mean_dir = self.evaluate(vmf.mean_direction)
log_true_sphere_surface_area = (
np.log(2) + (dim / 2) * np.log(np.pi) - gammaln(dim / 2))
expected = (
conc * np.sum(samples * mean_dir, axis=-1) +
np.where(conc > 0,
(dim / 2 - 1) * np.log(conc) -
(dim / 2) * np.log(2 * np.pi) -
np.log(ive(dim / 2 - 1, conc)) -
np.abs(conc),
-log_true_sphere_surface_area))
self.assertAllClose(expected, self.evaluate(log_prob),
atol=atol)
def _verifySampleAndPdfConsistency(self, vmf, rtol=0.075):
"""Verifies samples are consistent with the PDF using importance sampling.
In particular, we verify an estimate the surface area of the n-dimensional
hypersphere, and the surface areas of the spherical caps demarcated by
a handful of survival rates.
Args:
vmf: A `VonMisesFisher` distribution instance.
rtol: Relative difference tolerable.
"""
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 50000
samples = vmf.sample(sample_shape=[nsamples])
samples = tf.debugging.check_numerics(samples, 'samples')
log_prob = vmf.log_prob(samples)
log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')
log_importance = -log_prob
sphere_surface_area_estimate, samples, importance, conc = self.evaluate([
tf.exp(
tf.reduce_logsumexp(input_tensor=log_importance, axis=0) -
tf.math.log(tf.cast(nsamples, dtype=tf.float32))), samples,
tf.exp(log_importance), vmf.concentration
])
true_sphere_surface_area = 2 * (np.pi)**(dim / 2) * self.evaluate(
tf.exp(-tf.math.lgamma(dim / 2)))
# Broadcast to correct size
true_sphere_surface_area += np.zeros_like(sphere_surface_area_estimate)
# Highly concentrated distributions do not get enough coverage to provide
# a reasonable full-sphere surface area estimate. These are covered below
# by CDF-based hypersphere cap surface area estimates.
self.assertAllClose(
true_sphere_surface_area[np.where(conc < 3)],
sphere_surface_area_estimate[np.where(conc < 3)],
rtol=rtol)
# Assert surface area of hyperspherical cap For some CDFs in [.05,.45],
# (h must be greater than 0 for the hypersphere cap surface area
# calculation to hold).
for survival_rate in 0.95, .9, .75, .6:
cdf = (1 - survival_rate)
mean_dir = self.evaluate(vmf.mean_direction)
dotprods = np.sum(samples * mean_dir, -1)
# Empirical estimate of the effective dot-product of the threshold that
# selects for a given CDF level, that is the cosine of the largest
# passable angle, or the minimum cosine for a within-CDF sample.
dotprod_thresh = np.percentile(
dotprods, 100 * survival_rate, axis=0, keepdims=True)
dotprod_above_thresh = np.float32(dotprods > dotprod_thresh)
sphere_cap_surface_area_ests = (
cdf * (importance * dotprod_above_thresh).sum(0) /
dotprod_above_thresh.sum(0))
h = (1 - dotprod_thresh)
self.assertGreaterEqual(h.min(), 0) # h must be >= 0 for the eqn below
true_sphere_cap_surface_area = (
0.5 * true_sphere_surface_area *
self.evaluate(tf.math.betainc((dim - 1) / 2, 0.5, 2 * h - h**2)))
if dim == 3: # For 3-d we have a simpler form we can double-check.
self.assertAllClose(2 * np.pi * h, true_sphere_cap_surface_area)
self.assertAllClose(
true_sphere_cap_surface_area,
sphere_cap_surface_area_ests +
np.zeros_like(true_sphere_cap_surface_area),
rtol=rtol)
def _verifyCovariance(self, vmf):
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 10000
samples = vmf.sample(nsamples)
samples = tf.debugging.check_numerics(samples, 'samples')
cov = vmf.covariance()
samples, cov = self.evaluate([samples, cov])
batched_samples = np.reshape(samples, [nsamples, -1, dim])
batch_size = batched_samples.shape[1]
est_cov = np.zeros([batch_size, dim, dim], dtype=cov.dtype)
for bi in range(batched_samples.shape[1]):
est_cov[bi] = np.cov(batched_samples[:, bi], rowvar=False)
self.assertAllClose(
np.reshape(est_cov, cov.shape),
cov,
atol=0.015)
def testSampleAndPdfConsistency2d(self):
mean_dir = tf.nn.l2_normalize([[1., 2],
[-2, -3]], axis=-1)
concentration = [[0], [1e-5], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf)
def testSampleAndPdfConsistency3d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3],
[-2, -3, -1]], axis=-1)
concentration = [[0], [1e-5], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
# TODO(bjp): Enable self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf, atol=.002)
def testSampleAndPdfConsistency4d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3, 4],
[-2, -3, -1, 0]], axis=-1)
concentration = [[0], [1e-4], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
# TODO(bjp): Enable self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf)
def testSampleAndPdfConsistency5d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3, 4, 5],
[-2, -3, -1, 0, 1]], axis=-1)
# TODO(bjp): Numerical instability 0 < k < 1e-2 concentrations.
# Should resolve by eliminating the bessel_i recurrence in favor of
# a more stable algorithm, e.g. cephes.
concentration = [[0], [5e-2], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
# TODO(bjp): Enable self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf)
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_10290 | """
scanner
scan the COVID-19 government sites
data is fetched and cleaned then pushed to a git repo
files are only updated if the cleaned version changes
"""
from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter
import configparser
import sys
import os
from datetime import datetime, timezone, timedelta
import time
from loguru import logger
from typing import List, Dict, Tuple
from data_pipeline import DataPipeline, DataPipelineConfig
from specialized_capture import SpecializedCapture, special_cases
from util import get_host
import udatetime
import util_git
# ----------------------
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'-f', '--format', dest='format_html', action='store_true', default=False,
help='run the html formater (only)')
parser.add_argument(
'-c', '--clean', dest='clean_html', action='store_true', default=False,
help='run the html cleaner (only)')
parser.add_argument(
'-x', '--extract', dest='extract_html', action='store_true', default=False,
help='run the html extractor (only)')
parser.add_argument('--trace', dest='trace', action='store_true', default=False,
help='turn on tracing')
parser.add_argument('-a', '--auto_push', dest='auto_push', action='store_true', default=False,
help='checkin data to the git repo at end of run')
parser.add_argument('--rerun_now', dest='rerun_now', action='store_true', default=False,
help='include items that were fetched in the last 15 minutes')
parser.add_argument('--continuous', dest='continuous', action='store_true', default=False,
help='Run at 0:05 and 0:35')
parser.add_argument('--auto_update', dest='auto_update', action='store_true', default=False,
help='Pull changes and restart if source has changed')
parser.add_argument('--guarded', dest='guarded', action='store_true', default=False)
parser.add_argument('--firefox', dest='use_firefox', action='store_true', default=False,
help='capture using firefox')
parser.add_argument('--chrome', dest='use_chrome', action='store_true', default=False,
help='capture using chrome')
parser.add_argument('--show_browser', dest='show_browser', action='store_true', default=False,
help='show browser while running')
parser.add_argument('-i', '--image', dest='capture_image', action='store_true', default=False,
help='capture image after each change')
# data dir args
config = configparser.ConfigParser()
if os.path.exists("data_pipeline.local.ini"):
config.read('data_pipeline.local.ini')
elif os.path.exists("data_pipeline.ini"):
config.read('data_pipeline.ini')
else:
raise Exception("Missing data_pipeline.ini file")
parser.add_argument(
'--base_dir',
default=config["DIRS"]["base_dir"],
help='Local GitHub repo dir for corona19-data-archive')
parser.add_argument(
'--temp_dir',
default=config["DIRS"]["temp_dir"],
help='Local temp dir for snapshots')
# ----
def next_time() -> datetime:
t = datetime.now()
xmin = t.minute
if xmin < 25:
xmin = 35
elif xmin < 55:
t = t + timedelta(hours=1)
xmin = 5
else:
t = t + timedelta(hours=1)
xmin = 35
t = datetime(t.year, t.month, t.day, t.hour, xmin, 0)
return t
def init_specialized_capture(args: Namespace) -> SpecializedCapture:
temp_dir = args.temp_dir
publish_dir = os.path.join(args.base_dir, "captive-browser")
capture = SpecializedCapture(temp_dir, publish_dir)
return capture
def run_continuous(scanner: DataPipeline, capture: SpecializedCapture, auto_push: bool):
if util_git.monitor_check(): return
host = get_host()
try:
print("starting continuous run")
scanner.update_sources()
scanner.process()
if capture:
try:
special_cases(capture)
except Exception as ex:
logger.error(ex)
logger.error("*** continue after exception in specialized capture")
if auto_push: util_git.push(scanner.config.base_dir, f"{udatetime.to_logformat(scanner.change_list.start_date)} on {host}")
if util_git.monitor_check(): return
cnt = 1
t = next_time()
print(f"sleep until {t}")
while True:
time.sleep(15)
if datetime.now() < t: continue
if util_git.monitor_check(): break
print("==================================")
print(f"=== run {cnt} at {t}")
print("==================================")
try:
scanner.update_sources()
scanner.process()
if capture: special_cases(capture)
if auto_push: util_git.push(scanner.config.base_dir, f"{udatetime.to_displayformat(scanner.change_list.start_date)} on {host}")
except Exception as ex:
logger.exception(ex)
print(f"run failed, wait 5 minutes and try again")
t = t + timedelta(minutes=5)
print("==================================")
print("")
t = next_time()
print(f"sleep until {t}")
cnt += 1
finally:
if capture: capture.close()
def run_once(scanner: DataPipeline, auto_push: bool):
scanner.update_sources()
scanner.process()
if auto_push:
host = get_host()
util_git.push(scanner.config.base_dir, f"{udatetime.to_logformat(scanner.change_list.start_date)} on {host}")
def main(args_list=None):
if args_list is None:
args_list = sys.argv[1:]
args = parser.parse_args(args_list)
if args.auto_update:
return util_git.monitor_start("--auto_update")
if not args.auto_push:
logger.warning("github push is DISABLED")
config = DataPipelineConfig(args.base_dir, args.temp_dir, flags = {
"trace": args.trace,
"capture_image": args.capture_image,
"rerun_now": args.rerun_now,
"firefox": args.use_firefox,
"chrome": args.use_chrome,
"headless": not args.show_browser,
})
scanner = DataPipeline(config)
capture = init_specialized_capture(args)
if args.clean_html or args.extract_html or args.format_html:
if args.format_html: scanner.format_html(rerun=True)
if args.clean_html: scanner.clean_html(rerun=True)
if args.extract_html: scanner.extract_html(rerun=True)
elif args.continuous:
scanner.format_html()
scanner.clean_html()
scanner.extract_html()
run_continuous(scanner, capture, auto_push = args.auto_push)
else:
scanner.format_html()
scanner.clean_html()
scanner.extract_html()
run_once(scanner, args.auto_push)
if __name__ == "__main__":
main()
|
the-stack_0_10291 | import numpy as np
from numpy.testing.utils import assert_equal
from brian2.synapses.spikequeue import SpikeQueue
from brian2.units.stdunits import ms
from brian2.memory.dynamicarray import DynamicArray1D
def create_all_to_all(N):
'''
Return a tuple containing `synapses` and `delays` in the form that is needed
for the `SpikeQueue` initializer.
Every synapse has a delay depending on the presynaptic neuron.
'''
data = np.repeat(np.arange(N), N)
delays = DynamicArray1D(data.shape, dtype=np.int32)
delays[:] = data
synapses = [DynamicArray1D(N, dtype=np.int32) for _ in xrange(N)]
for i in xrange(N):
synapses[i][:] = np.arange(N) + i*N
return synapses, delays
def create_one_to_one(N):
'''
Return a tuple containing `synapses` and `delays` in the form that is needed
for the `SpikeQueue` initializer.
Every synapse has a delay depending on the presynaptic neuron.
'''
data = np.arange(N)
delays = DynamicArray1D(data.shape, dtype=np.int32)
delays[:] = data
data = np.arange(N)
synapses = [DynamicArray1D(1, dtype=np.int32) for _ in xrange(N)]
for i in xrange(N):
synapses[i][:] = i
return synapses, delays
def test_spikequeue():
N = 100
synapses, delays = create_one_to_one(N)
queue = SpikeQueue()
queue.compress(delays, synapses, N)
queue.push(np.arange(N, dtype=np.int32), delays)
for i in xrange(N):
assert_equal(queue.peek(), np.array([i]))
queue.next()
for i in xrange(N):
assert_equal(queue.peek(), np.array([]))
queue.next()
synapses, delays = create_all_to_all(N)
queue = SpikeQueue()
queue.compress(delays, synapses, N*N)
queue.push(np.arange(N*N, dtype=np.int32), delays)
for i in xrange(N):
assert_equal(queue.peek(), i*N + np.arange(N))
queue.next()
for i in xrange(N):
assert_equal(queue.peek(), np.array([]))
queue.next()
if __name__ == '__main__':
test_spikequeue()
|
the-stack_0_10292 | # The MIT License (MIT)
#
# Copyright (c) 2019 Brent Rubell for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_ntp`
================================================================================
Network Time Protocol (NTP) helper for CircuitPython
* Author(s): Brent Rubell
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import time
import rtc
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_NTP.git"
class NTP:
"""Network Time Protocol (NTP) helper module for CircuitPython.
This module does not handle daylight savings or local time.
:param adafruit_esp32spi esp: ESP32SPI object.
"""
def __init__(self, esp):
# Verify ESP32SPI module
if "ESP_SPIcontrol" in str(type(esp)):
self._esp = esp
else:
raise TypeError("Provided object is not an ESP_SPIcontrol object.")
self.valid_time = False
def set_time(self, tz_offset=0):
"""Fetches and sets the microcontroller's current time
in seconds since since Jan 1, 1970.
:param int tz_offset: Timezone offset from GMT
"""
try:
now = self._esp.get_time()
now = time.localtime(now[0] + (tz_offset * 3600)) # 3600 seconds in an hour
rtc.RTC().datetime = now
self.valid_time = True
except ValueError as error:
print(str(error))
return
|
the-stack_0_10293 | from random import randint
import sys
RANDOM_NUMS = []
class Assign:
def assign(self, number, some_member, member_list):
for item in member_list:
if number == item.assignee:
continue
some_member.assignee = number
break
def assign_nums(self, member_list):
for member in member_list:
count = 0
random_num = randint(0, len(member_list) - 1)
while random_num in RANDOM_NUMS or random_num == member_list.index(member):
random_num = randint(0, len(member_list) - 1)
if count == 3:
print("Loop failed, try again!")
sys.exit()
count += 1
RANDOM_NUMS.append(random_num)
count -= count
Assign.assign(random_num, member, member_list) |
the-stack_0_10297 | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import math
import sys
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import chain, islice, repeat
from .animations.utils import spinner_player
from .configuration import config_handler
@contextmanager
def alive_bar(total=None, title=None, calibrate=None, **options):
"""An alive progress bar to keep track of lengthy operations.
It has a spinner indicator, time elapsed, throughput and eta.
When the operation finishes, a receipt is displayed with statistics.
If the code was being executed in a headless environment, ie without a
connected tty, all features of the alive progress bar will be disabled
but the final receipt.
Another cool feature is that it tracks the actual count in regard of the
expected count. It will look different if you send more (or less) than
expected.
Also, the bar installs a hook in the system print function, which cleans
any garbage mix-up of texts, allowing you to print() while using the bar.
And finally, it also do not show anything like `eta: 1584s`, it will nicely
show `eta: 0:26:24` as you would expect (but anything less than a minute
is indeed `eta: 42s`). :)
Use it like this:
>>> from alive_progress import alive_bar
... with alive_bar(<total>) as bar:
... for item in <iterable>:
... # process item
... bar() # makes the bar go forward
The `bar()` call is what makes the bar go forward. You can call it always,
or you can choose when to call it, depending on what you want to monitor.
While in a progress bar context, you have two ways to output messages:
- call `bar('text')`, which besides incrementing the counter, also
sets/overwrites an inline message within the bar;
- call `print('text')`, which prints an enriched message that includes
the current position of the progress bar, effectively leaving behind a
log and continuing the progress bar below it.
Both methods always clear the line appropriately to remove any garbage of
previous messages on screen.
If the bar is over or underused, it will warn you!
To test all supported scenarios, you can do this:
>>> for x in 1000, 1500, 700, 0:
... with alive_bar(x) as bar:
... for i in range(1000):
... time.sleep(.005)
... bar()
Expected results are these (but you have to see them in motion!):
[========================================] 3000/3000 [100%] in 7.4s (408.09/s)
[==============================! ] (!) 3000/4000 [75%] in 7.3s (408.90/s)
[========================================x (!) 3000/2000 [150%] in 7.4s (408.11/s)
[========================================] 3000 in 7.4s (407.54/s)
Args:
total (Optional[int]): the total expected count
title (Optional[str]): the title, will be printed whenever there's no custom message
calibrate (int): maximum theoretical throughput to calibrate animation speed
(cannot be in the global configuration because it depends on the current mode)
**options: custom configuration options, which override the global configuration:
length (int): number of characters to render the animated progress bar
spinner (Union[str | object]): spinner name in alive_progress.SPINNERS or custom
bar (Union[str | object]): bar name in alive_progress.BARS or custom
unknown (Union[str | object]): spinner name in alive_progress.SPINNERS or custom
theme (str): theme name in alive_progress.THEMES
force_tty (bool): runs animations even without a tty (pycharm terminal for example)
manual (bool): set to manually control percentage
"""
if total is not None:
if not isinstance(total, int):
raise TypeError("integer argument expected, got '{}'.".format(type(total).__name__))
if total <= 0:
total = None
config = config_handler(**options)
def to_elapsed():
return timedelta(seconds=int(run.elapsed)) if run.elapsed >= 60 else \
'{:.1f}s'.format(run.elapsed) if end else '{}s'.format(int(run.elapsed))
def clear_traces():
sys.__stdout__.write('\033[2K\r')
def run():
player = spinner_player(config.spinner())
while thread:
event.wait()
alive_repr(next(player))
time.sleep(1. / fps())
def alive_repr(spin=''):
update_data()
line = '{} {}{}{} in {} {} {}'.format(
bar_repr(run.percent, end), spin, spin and ' ' or '',
monitor(), to_elapsed(), run.stats(), run.text or title or ''
)
line_len = len(line)
with print_lock:
if line_len < run.last_line_len:
clear_traces()
sys.__stdout__.write(line + (spin and '\r' or '\n'))
sys.__stdout__.flush()
run.last_line_len = line_len
def flush_buffer():
if print_buffer:
print()
def sanitize_text(text):
return ' '.join(str(text).splitlines())
if config.manual:
def bar(perc=None, text=None):
if perc is not None:
flush_buffer()
run.percent = float(perc)
if text is not None:
run.text = sanitize_text(text)
return run.percent
else:
def bar(text=None, incr=1):
if incr > 0:
flush_buffer()
run.count += int(incr)
if text is not None:
run.text = sanitize_text(text)
return run.count
def print_hook(part):
if part != '\n':
# this will generate a sequence of lines interspersed with None, which will later
# be rendered as the indent filler to align additional lines under the same header.
gen = chain.from_iterable(zip(repeat(None), part.splitlines(True)))
print_buffer.extend(islice(gen, 1, None))
else:
header = header_template.format(run.count)
nested = ''.join(line or ' ' * len(header) for line in print_buffer)
with print_lock:
clear_traces()
sys.__stdout__.write('{}{}\n'.format(header, nested))
print_buffer[:] = []
print_buffer, print_lock = [], threading.Lock()
header_template = 'on {}: ' if config.enrich_print else ''
print_hook.write = print_hook
print_hook.flush = lambda: None
print_hook.isatty = sys.__stdout__.isatty
def start_monitoring(offset=0.):
sys.stdout = print_hook
event.set()
run.init = time.time() - offset
def stop_monitoring(clear):
if clear:
event.clear()
sys.stdout = sys.__stdout__
return time.time() - run.init
thread, event = None, threading.Event()
if sys.stdout.isatty() or config.force_tty:
@contextmanager
def pause_monitoring():
offset = stop_monitoring(True)
alive_repr()
yield
start_monitoring(offset)
bar.pause = pause_monitoring
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
def update_data():
update_hook()
run.elapsed = time.time() - run.init
run.rate = current() / run.elapsed if run.elapsed else 0.
run.eta_text = eta_text()
if total or config.manual: # we can track progress and therefore eta.
def eta_text():
if run.rate:
eta = (logic_total - current()) / run.rate
if eta >= 0:
return '{:.0f}s'.format(eta) if eta < 60 \
else timedelta(seconds=math.ceil(eta))
return '?'
bar_repr = config.bar(config.length)
stats = lambda: '({:.1{}}/s, eta: {})'.format(run.rate, format_spec, run.eta_text) # noqa
else: # unknown progress.
eta_text = lambda: None # noqa
bar_repr = config.unknown(config.length, config.bar)
stats = lambda: '({:.1f}/s)'.format(run.rate) # noqa
stats_end = lambda: '({:.2{}}/s)'.format(run.rate, format_spec) # noqa
if total or not config.manual: # we can count items.
logic_total, format_spec, factor, current = total, 'f', 1.e6, lambda: run.count # noqa
else: # there's only a manual percentage.
logic_total, format_spec, factor, current = 1., '%', 1., lambda: run.percent # noqa
# calibration of the dynamic fps engine.
# I've started with the equation y = log10(x + m) * k + n, where:
# y is the desired fps, m and n are horizontal and vertical translation,
# k is a calibration factor, computed from some user input c (see readme for details).
# considering minfps and maxfps as given constants, I came to:
# fps = log10(x + 1) * k + minfps, which must be equal to maxfps for x = c,
# so the factor k = (maxfps - minfps) / log10(c + 1), and
# fps = log10(x + 1) * (maxfps - minfps) / log10(c + 1) + minfps
# neat! ;)
min_fps, max_fps = 2., 60.
calibrate = max(0., calibrate or factor)
adjust_log_curve = 100. / min(calibrate, 100.) # adjust curve for small numbers
factor = (max_fps - min_fps) / math.log10((calibrate * adjust_log_curve) + 1.)
def fps():
if run.rate <= 0:
return 10. # bootstrap speed
if run.rate < calibrate:
return math.log10((run.rate * adjust_log_curve) + 1.) * factor + min_fps
return max_fps
end, run.text, run.eta_text, run.stats = False, '', '', stats
run.count, run.last_line_len = 0, 0
run.percent, run.rate, run.init, run.elapsed = 0., 0., 0., 0.
if total:
if config.manual:
def update_hook():
run.count = int(math.ceil(run.percent * total))
else:
def update_hook():
run.percent = run.count / total
monitor = lambda: '{}{}/{} [{:.0%}]'.format( # noqa
'(!) ' if end and run.count != total else '', run.count, total, run.percent
)
elif config.manual:
update_hook = lambda: None # noqa
monitor = lambda: '{}{:.0%}'.format( # noqa
'(!) ' if end and run.percent != 1. else '', run.percent
)
else:
run.percent = 1.
update_hook = lambda: None # noqa
monitor = lambda: '{}'.format(run.count) # noqa
start_monitoring()
try:
yield bar
finally:
flush_buffer()
stop_monitoring(False)
if thread:
local_copy = thread
thread = None # lets the internal thread terminate gracefully.
local_copy.join()
end, run.text, run.stats = True, '', stats_end
alive_repr()
|
the-stack_0_10299 | SPACE = 'space'
COMMENT = 'comment'
PLUS_ASSIGN = 'plus_assign'
PLUS = 'plus'
MOD_ASSIGN = 'mod_assign'
MOD = 'mod'
DIVISION_ASSIGN = 'div_assign'
DIVISION = 'div'
POW = 'pow'
MULT_ASSIGN = 'mult_assign'
MULT = 'mult'
NOT = 'not'
AND = 'and'
OR = 'or'
XOR = 'xor'
GREATER_EQUAL = 'greater_eq'
GREATER = 'greater'
LESS_EQUAL = 'less_eq'
LESS = 'less'
EQUAL = 'eq'
ASSIGN = 'assign'
NOT_EQUAL = 'not_eq'
BRACKET_OPEN = 'bracket_open'
BRACKET_CLOSE = 'bracket_close'
CURLY_BRACKET_OPEN = 'curly_bracket_open'
CURLY_BRACKET_CLOSE = 'curly_bracket_close'
SEMICOLON = 'semicolon'
CONCAT = 'concat'
ADD = 'add'
IF = 'if'
ELSE = 'else'
WHILE ='while'
PRINT = 'print'
INPUT = 'input'
BOOL = 'bool'
STRING = 'string'
MINUS_ASSIGN = 'minus_assign'
FLOAT = 'float'
INT = 'int'
MINUS = 'minus'
VARIABLE = 'var'
|
the-stack_0_10300 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .connectors import Connection, StageConnectionDescription, create_connection_description
__all__ = [
'Connection',
'StageConnectionDescription',
'create_connection_description'
]
|
the-stack_0_10301 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest.mock import patch
import pandas as pd
from ax.core.base_trial import BaseTrial, TrialStatus
from ax.core.data import Data
from ax.core.generator_run import GeneratorRun, GeneratorRunType
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_arms, get_experiment, get_objective
TEST_DATA = Data(
df=pd.DataFrame(
[
{
"arm_name": "0_0",
"metric_name": get_objective().metric.name,
"mean": 1.0,
"sem": 2.0,
"trial_index": 0,
}
]
)
)
class TrialTest(TestCase):
def setUp(self):
self.experiment = get_experiment()
self.trial = self.experiment.new_trial()
self.arm = get_arms()[0]
self.trial.add_arm(self.arm)
def test_eq(self):
new_trial = self.experiment.new_trial()
self.assertNotEqual(self.trial, new_trial)
def test_basic_properties(self):
self.assertEqual(self.experiment, self.trial.experiment)
self.assertEqual(self.trial.index, 0)
self.assertEqual(self.trial.status, TrialStatus.CANDIDATE)
self.assertIsNotNone(self.trial.time_created)
self.assertEqual(self.trial.arms_by_name["0_0"], self.trial.arm)
self.assertEqual(self.trial.arms, [self.arm])
self.assertEqual(self.trial.abandoned_arms, [])
self.assertEqual(
self.trial.generator_run.generator_run_type, GeneratorRunType.MANUAL.name
)
# Test empty arms
with self.assertRaises(AttributeError):
self.experiment.new_trial().arm_weights
self.trial._status = TrialStatus.COMPLETED
self.assertTrue(self.trial.status.is_completed)
self.assertTrue(self.trial.completed_successfully)
def test_adding_new_trials(self):
new_arm = get_arms()[1]
new_trial = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[new_arm])
)
with self.assertRaises(ValueError):
self.experiment.new_trial(generator_run=GeneratorRun(arms=get_arms()))
self.assertEqual(new_trial.arms_by_name["1_0"], new_arm)
with self.assertRaises(KeyError):
self.trial.arms_by_name["1_0"]
def test_add_trial_same_arm(self):
# Check that adding new arm w/out name works correctly.
new_trial1 = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[self.arm.clone(clear_name=True)])
)
self.assertEqual(new_trial1.arm.name, self.trial.arm.name)
self.assertFalse(new_trial1.arm is self.trial.arm)
# Check that adding new arm with name works correctly.
new_trial2 = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[self.arm.clone()])
)
self.assertEqual(new_trial2.arm.name, self.trial.arm.name)
self.assertFalse(new_trial2.arm is self.trial.arm)
arm_wrong_name = self.arm.clone(clear_name=True)
arm_wrong_name.name = "wrong_name"
with self.assertRaises(ValueError):
new_trial2 = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[arm_wrong_name])
)
def test_abandonment(self):
self.assertFalse(self.trial.status.is_abandoned)
self.trial.mark_abandoned(reason="testing")
self.assertTrue(self.trial.status.is_abandoned)
self.assertFalse(self.trial.status.is_failed)
self.assertTrue(self.trial.did_not_complete)
@patch(
f"{BaseTrial.__module__}.{BaseTrial.__name__}.fetch_data",
return_value=TEST_DATA,
)
def test_objective_mean(self, _mock):
self.assertEqual(self.trial.objective_mean, 1.0)
@patch(
f"{BaseTrial.__module__}.{BaseTrial.__name__}.fetch_data", return_value=Data()
)
def test_objective_mean_empty_df(self, _mock):
with self.assertRaisesRegex(ValueError, "No data was retrieved for trial"):
self.assertIsNone(self.trial.objective_mean)
def testRepr(self):
repr_ = (
"Trial(experiment_name='test', index=0, "
"status=TrialStatus.CANDIDATE, arm=Arm(name='0_0', "
"parameters={'w': 0.85, 'x': 1, 'y': 'baz', 'z': False}))"
)
self.assertEqual(str(self.trial), repr_)
|
the-stack_0_10302 | # Copyright 2015 PLUMgrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import MutableMapping
import ctypes as ct
import multiprocessing
import os
from .libbcc import lib, _RAW_CB_TYPE
from .perf import Perf
from subprocess import check_output
BPF_MAP_TYPE_HASH = 1
BPF_MAP_TYPE_ARRAY = 2
BPF_MAP_TYPE_PROG_ARRAY = 3
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
BPF_MAP_TYPE_PERCPU_HASH = 5
BPF_MAP_TYPE_PERCPU_ARRAY = 6
BPF_MAP_TYPE_STACK_TRACE = 7
BPF_MAP_TYPE_CGROUP_ARRAY = 8
BPF_MAP_TYPE_LRU_HASH = 9
BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
stars_max = 40
log2_index_max = 65
linear_index_max = 1025
# helper functions, consider moving these to a utils module
def _stars(val, val_max, width):
i = 0
text = ""
while (1):
if (i > (width * val / val_max) - 1) or (i > width - 1):
break
text += "*"
i += 1
if val > val_max:
text = text[:-1] + "+"
return text
def _print_log2_hist(vals, val_type):
global stars_max
log2_dist_max = 64
idx_max = -1
val_max = 0
for i, v in enumerate(vals):
if v > 0: idx_max = i
if v > val_max: val_max = v
if idx_max <= 32:
header = " %-19s : count distribution"
body = "%10d -> %-10d : %-8d |%-*s|"
stars = stars_max
else:
header = " %-29s : count distribution"
body = "%20d -> %-20d : %-8d |%-*s|"
stars = int(stars_max / 2)
if idx_max > 0:
print(header % val_type);
for i in range(1, idx_max + 1):
low = (1 << i) >> 1
high = (1 << i) - 1
if (low == high):
low -= 1
val = vals[i]
print(body % (low, high, val, stars,
_stars(val, val_max, stars)))
def _print_linear_hist(vals, val_type):
global stars_max
log2_dist_max = 64
idx_max = -1
val_max = 0
for i, v in enumerate(vals):
if v > 0: idx_max = i
if v > val_max: val_max = v
header = " %-13s : count distribution"
body = " %-10d : %-8d |%-*s|"
stars = stars_max
if idx_max >= 0:
print(header % val_type);
for i in range(0, idx_max + 1):
val = vals[i]
print(body % (i, val, stars,
_stars(val, val_max, stars)))
def Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs):
"""Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
Create a python object out of a reference to a bpf table handle"""
ttype = lib.bpf_table_type_id(bpf.module, map_id)
t = None
if ttype == BPF_MAP_TYPE_HASH:
t = HashTable(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_ARRAY:
t = Array(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PROG_ARRAY:
t = ProgArray(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PERF_EVENT_ARRAY:
t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PERCPU_HASH:
t = PerCpuHash(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
elif ttype == BPF_MAP_TYPE_PERCPU_ARRAY:
t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
elif ttype == BPF_MAP_TYPE_STACK_TRACE:
t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_HASH:
t = LruHash(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_PERCPU_HASH:
t = LruPerCpuHash(bpf, map_id, map_fd, keytype, leaftype)
if t == None:
raise Exception("Unknown table type %d" % ttype)
return t
class TableBase(MutableMapping):
def __init__(self, bpf, map_id, map_fd, keytype, leaftype):
self.bpf = bpf
self.map_id = map_id
self.map_fd = map_fd
self.Key = keytype
self.Leaf = leaftype
self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
self._cbs = {}
def key_sprintf(self, key):
key_p = ct.pointer(key)
buf = ct.create_string_buffer(ct.sizeof(self.Key) * 8)
res = lib.bpf_table_key_snprintf(self.bpf.module, self.map_id,
buf, len(buf), key_p)
if res < 0:
raise Exception("Could not printf key")
return buf.value
def leaf_sprintf(self, leaf):
leaf_p = ct.pointer(leaf)
buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id,
buf, len(buf), leaf_p)
if res < 0:
raise Exception("Could not printf leaf")
return buf.value
def key_scanf(self, key_str):
key = self.Key()
key_p = ct.pointer(key)
res = lib.bpf_table_key_sscanf(self.bpf.module, self.map_id,
key_str, key_p)
if res < 0:
raise Exception("Could not scanf key")
return key
def leaf_scanf(self, leaf_str):
leaf = self.Leaf()
leaf_p = ct.pointer(leaf)
res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id,
leaf_str, leaf_p)
if res < 0:
raise Exception("Could not scanf leaf")
return leaf
def __getitem__(self, key):
key_p = ct.pointer(key)
leaf = self.Leaf()
leaf_p = ct.pointer(leaf)
res = lib.bpf_lookup_elem(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p))
if res < 0:
raise KeyError
return leaf
def __setitem__(self, key, leaf):
key_p = ct.pointer(key)
leaf_p = ct.pointer(leaf)
res = lib.bpf_update_elem(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p), 0)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Could not update table: %s" % errstr)
# override the MutableMapping's implementation of these since they
# don't handle KeyError nicely
def itervalues(self):
for key in self:
# a map entry may be deleted in between discovering the key and
# fetching the value, suppress such errors
try:
yield self[key]
except KeyError:
pass
def iteritems(self):
for key in self:
try:
yield (key, self[key])
except KeyError:
pass
def items(self):
return [item for item in self.iteritems()]
def values(self):
return [value for value in self.itervalues()]
def clear(self):
# default clear uses popitem, which can race with the bpf prog
for k in self.keys():
self.__delitem__(k)
def zero(self):
# Even though this is not very efficient, we grab the entire list of
# keys before enumerating it. This helps avoid a potential race where
# the leaf assignment changes a hash table bucket that is being
# enumerated by the same loop, and may lead to a hang.
for k in list(self.keys()):
self[k] = self.Leaf()
def __iter__(self):
return TableBase.Iter(self, self.Key)
def iter(self): return self.__iter__()
def keys(self): return self.__iter__()
class Iter(object):
def __init__(self, table, keytype):
self.Key = keytype
self.table = table
k = self.Key()
kp = ct.pointer(k)
# if 0 is a valid key, try a few alternatives
if k in table:
ct.memset(kp, 0xff, ct.sizeof(k))
if k in table:
ct.memset(kp, 0x55, ct.sizeof(k))
if k in table:
raise Exception("Unable to allocate iterator")
self.key = k
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.key = self.table.next(self.key)
return self.key
def next(self, key):
next_key = self.Key()
next_key_p = ct.pointer(next_key)
key_p = ct.pointer(key)
res = lib.bpf_get_next_key(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(next_key_p, ct.c_void_p))
if res < 0:
raise StopIteration()
return next_key
def print_log2_hist(self, val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None):
"""print_log2_hist(val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None)
Prints a table as a log2 histogram. The table must be stored as
log2. The val_type argument is optional, and is a column header.
If the histogram has a secondary key, multiple tables will print
and section_header can be used as a header description for each.
If section_print_fn is not None, it will be passed the bucket value
to format into a string as it sees fit. If bucket_fn is not None,
it will be used to produce a bucket value for the histogram keys.
The maximum index allowed is log2_index_max (65), which will
accomodate any 64-bit integer in the histogram.
"""
if isinstance(self.Key(), ct.Structure):
tmp = {}
f1 = self.Key._fields_[0][0]
f2 = self.Key._fields_[1][0]
for k, v in self.items():
bucket = getattr(k, f1)
if bucket_fn:
bucket = bucket_fn(bucket)
vals = tmp[bucket] = tmp.get(bucket, [0] * log2_index_max)
slot = getattr(k, f2)
vals[slot] = v.value
for bucket, vals in tmp.items():
if section_print_fn:
print("\n%s = %s" % (section_header,
section_print_fn(bucket)))
else:
print("\n%s = %r" % (section_header, bucket))
_print_log2_hist(vals, val_type)
else:
vals = [0] * log2_index_max
for k, v in self.items():
vals[k.value] = v.value
_print_log2_hist(vals, val_type)
def print_linear_hist(self, val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None):
"""print_linear_hist(val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None)
Prints a table as a linear histogram. This is intended to span integer
ranges, eg, from 0 to 100. The val_type argument is optional, and is a
column header. If the histogram has a secondary key, multiple tables
will print and section_header can be used as a header description for
each. If section_print_fn is not None, it will be passed the bucket
value to format into a string as it sees fit. If bucket_fn is not None,
it will be used to produce a bucket value for the histogram keys.
The maximum index allowed is linear_index_max (1025), which is hoped
to be sufficient for integer ranges spanned.
"""
if isinstance(self.Key(), ct.Structure):
tmp = {}
f1 = self.Key._fields_[0][0]
f2 = self.Key._fields_[1][0]
for k, v in self.items():
bucket = getattr(k, f1)
if bucket_fn:
bucket = bucket_fn(bucket)
vals = tmp[bucket] = tmp.get(bucket, [0] * linear_index_max)
slot = getattr(k, f2)
vals[slot] = v.value
for bucket, vals in tmp.items():
if section_print_fn:
print("\n%s = %s" % (section_header,
section_print_fn(bucket)))
else:
print("\n%s = %r" % (section_header, bucket))
_print_linear_hist(vals, val_type)
else:
vals = [0] * linear_index_max
for k, v in self.items():
try:
vals[k.value] = v.value
except IndexError:
# Improve error text. If the limit proves a nusiance, this
# function be rewritten to avoid having one.
raise IndexError(("Index in print_linear_hist() of %d " +
"exceeds max of %d.") % (k.value, linear_index_max))
_print_linear_hist(vals, val_type)
class HashTable(TableBase):
def __init__(self, *args, **kwargs):
super(HashTable, self).__init__(*args, **kwargs)
def __len__(self):
i = 0
for k in self: i += 1
return i
def __delitem__(self, key):
key_p = ct.pointer(key)
res = lib.bpf_delete_elem(self.map_fd, ct.cast(key_p, ct.c_void_p))
if res < 0:
raise KeyError
class LruHash(HashTable):
def __init__(self, *args, **kwargs):
super(LruHash, self).__init__(*args, **kwargs)
class ArrayBase(TableBase):
def __init__(self, *args, **kwargs):
super(ArrayBase, self).__init__(*args, **kwargs)
self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
self.map_id))
def _normalize_key(self, key):
if isinstance(key, int):
if key < 0:
key = len(self) + key
key = self.Key(key)
if not isinstance(key, ct._SimpleCData):
raise IndexError("Array index must be an integer type")
if key.value >= len(self):
raise IndexError("Array index out of range")
return key
def __len__(self):
return self.max_entries
def __getitem__(self, key):
key = self._normalize_key(key)
return super(ArrayBase, self).__getitem__(key)
def __setitem__(self, key, leaf):
key = self._normalize_key(key)
super(ArrayBase, self).__setitem__(key, leaf)
def __delitem__(self, key):
key = self._normalize_key(key)
key_p = ct.pointer(key)
# Deleting from array type maps does not have an effect, so
# zero out the entry instead.
leaf = self.Leaf()
leaf_p = ct.pointer(leaf)
res = lib.bpf_update_elem(self.map_fd, ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p), 0)
if res < 0:
raise Exception("Could not clear item")
def __iter__(self):
return ArrayBase.Iter(self, self.Key)
class Iter(object):
def __init__(self, table, keytype):
self.Key = keytype
self.table = table
self.i = -1
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.i += 1
if self.i == len(self.table):
raise StopIteration()
return self.Key(self.i)
class Array(ArrayBase):
def __init__(self, *args, **kwargs):
super(Array, self).__init__(*args, **kwargs)
class ProgArray(ArrayBase):
def __init__(self, *args, **kwargs):
super(ProgArray, self).__init__(*args, **kwargs)
def __setitem__(self, key, leaf):
if isinstance(leaf, int):
leaf = self.Leaf(leaf)
if isinstance(leaf, self.bpf.Function):
leaf = self.Leaf(leaf.fd)
super(ProgArray, self).__setitem__(key, leaf)
class PerfEventArray(ArrayBase):
class Event(object):
def __init__(self, typ, config):
self.typ = typ
self.config = config
HW_CPU_CYCLES = Event(Perf.PERF_TYPE_HARDWARE, 0)
HW_INSTRUCTIONS = Event(Perf.PERF_TYPE_HARDWARE, 1)
HW_CACHE_REFERENCES = Event(Perf.PERF_TYPE_HARDWARE, 2)
HW_CACHE_MISSES = Event(Perf.PERF_TYPE_HARDWARE, 3)
HW_BRANCH_INSTRUCTIONS = Event(Perf.PERF_TYPE_HARDWARE, 4)
HW_BRANCH_MISSES = Event(Perf.PERF_TYPE_HARDWARE, 5)
HW_BUS_CYCLES = Event(Perf.PERF_TYPE_HARDWARE, 6)
HW_STALLED_CYCLES_FRONTEND = Event(Perf.PERF_TYPE_HARDWARE, 7)
HW_STALLED_CYCLES_BACKEND = Event(Perf.PERF_TYPE_HARDWARE, 8)
HW_REF_CPU_CYCLES = Event(Perf.PERF_TYPE_HARDWARE, 9)
# not yet supported, wip
#HW_CACHE_L1D_READ = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|0<<8|0<<16)
#HW_CACHE_L1D_READ_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|0<<8|1<<16)
#HW_CACHE_L1D_WRITE = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|1<<8|0<<16)
#HW_CACHE_L1D_WRITE_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|1<<8|1<<16)
#HW_CACHE_L1D_PREF = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|2<<8|0<<16)
#HW_CACHE_L1D_PREF_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|2<<8|1<<16)
#HW_CACHE_L1I_READ = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|0<<8|0<<16)
#HW_CACHE_L1I_READ_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|0<<8|1<<16)
#HW_CACHE_L1I_WRITE = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|1<<8|0<<16)
#HW_CACHE_L1I_WRITE_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|1<<8|1<<16)
#HW_CACHE_L1I_PREF = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|2<<8|0<<16)
#HW_CACHE_L1I_PREF_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|2<<8|1<<16)
#HW_CACHE_LL_READ = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|0<<8|0<<16)
#HW_CACHE_LL_READ_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|0<<8|1<<16)
#HW_CACHE_LL_WRITE = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|1<<8|0<<16)
#HW_CACHE_LL_WRITE_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|1<<8|1<<16)
#HW_CACHE_LL_PREF = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|2<<8|0<<16)
#HW_CACHE_LL_PREF_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|2<<8|1<<16)
def __init__(self, *args, **kwargs):
super(PerfEventArray, self).__init__(*args, **kwargs)
def __delitem__(self, key):
super(PerfEventArray, self).__delitem__(key)
self.close_perf_buffer(key)
def open_perf_buffer(self, callback):
"""open_perf_buffers(callback)
Opens a set of per-cpu ring buffer to receive custom perf event
data from the bpf program. The callback will be invoked for each
event submitted from the kernel, up to millions per second.
"""
for i in range(0, multiprocessing.cpu_count()):
self._open_perf_buffer(i, callback)
def _open_perf_buffer(self, cpu, callback):
fn = _RAW_CB_TYPE(lambda _, data, size: callback(cpu, data, size))
reader = lib.bpf_open_perf_buffer(fn, None, -1, cpu)
if not reader:
raise Exception("Could not open perf buffer")
fd = lib.perf_reader_fd(reader)
self[self.Key(cpu)] = self.Leaf(fd)
self.bpf._add_kprobe((id(self), cpu), reader)
# keep a refcnt
self._cbs[cpu] = fn
def close_perf_buffer(self, key):
reader = self.bpf.open_kprobes.get((id(self), key))
if reader:
lib.perf_reader_free(reader)
self.bpf._del_kprobe((id(self), key))
del self._cbs[key]
def _open_perf_event(self, cpu, typ, config):
fd = lib.bpf_open_perf_event(typ, config, -1, cpu)
if fd < 0:
raise Exception("bpf_open_perf_event failed")
try:
self[self.Key(cpu)] = self.Leaf(fd)
finally:
# the fd is kept open in the map itself by the kernel
os.close(fd)
def open_perf_event(self, ev):
"""open_perf_event(ev)
Configures the table such that calls from the bpf program to
table.perf_read(bpf_get_smp_processor_id()) will return the hardware
counter denoted by event ev on the local cpu.
"""
if not isinstance(ev, self.Event):
raise Exception("argument must be an Event, got %s", type(ev))
for i in range(0, multiprocessing.cpu_count()):
self._open_perf_event(i, ev.typ, ev.config)
class PerCpuHash(HashTable):
def __init__(self, *args, **kwargs):
self.reducer = kwargs.pop("reducer", None)
super(PerCpuHash, self).__init__(*args, **kwargs)
self.sLeaf = self.Leaf
self.total_cpu = multiprocessing.cpu_count()
# This needs to be 8 as hard coded into the linux kernel.
self.alignment = ct.sizeof(self.sLeaf) % 8
if self.alignment is 0:
self.Leaf = self.sLeaf * self.total_cpu
else:
# Currently Float, Char, un-aligned structs are not supported
if self.sLeaf == ct.c_uint:
self.Leaf = ct.c_uint64 * self.total_cpu
elif self.sLeaf == ct.c_int:
self.Leaf = ct.c_int64 * self.total_cpu
else:
raise IndexError("Leaf must be aligned to 8 bytes")
def getvalue(self, key):
result = super(PerCpuHash, self).__getitem__(key)
if self.alignment is 0:
ret = result
else:
ret = (self.sLeaf * self.total_cpu)()
for i in range(0, self.total_cpu):
ret[i] = result[i]
return ret
def __getitem__(self, key):
if self.reducer:
return reduce(self.reducer, self.getvalue(key))
else:
return self.getvalue(key)
def __setitem__(self, key, leaf):
super(PerCpuHash, self).__setitem__(key, leaf)
def sum(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default sum functions")
return self.sLeaf(reduce(lambda x,y: x+y, self.getvalue(key)))
def max(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default max functions")
return self.sLeaf(max(self.getvalue(key)))
def average(self, key):
result = self.sum(key)
result.value/=self.total_cpu
return result
class LruPerCpuHash(PerCpuHash):
def __init__(self, *args, **kwargs):
super(LruPerCpuHash, self).__init__(*args, **kwargs)
class PerCpuArray(ArrayBase):
def __init__(self, *args, **kwargs):
self.reducer = kwargs.pop("reducer", None)
super(PerCpuArray, self).__init__(*args, **kwargs)
self.sLeaf = self.Leaf
self.total_cpu = multiprocessing.cpu_count()
# This needs to be 8 as hard coded into the linux kernel.
self.alignment = ct.sizeof(self.sLeaf) % 8
if self.alignment is 0:
self.Leaf = self.sLeaf * self.total_cpu
else:
# Currently Float, Char, un-aligned structs are not supported
if self.sLeaf == ct.c_uint:
self.Leaf = ct.c_uint64 * self.total_cpu
elif self.sLeaf == ct.c_int:
self.Leaf = ct.c_int64 * self.total_cpu
else:
raise IndexError("Leaf must be aligned to 8 bytes")
def getvalue(self, key):
result = super(PerCpuArray, self).__getitem__(key)
if self.alignment is 0:
ret = result
else:
ret = (self.sLeaf * self.total_cpu)()
for i in range(0, self.total_cpu):
ret[i] = result[i]
return ret
def __getitem__(self, key):
if (self.reducer):
return reduce(self.reducer, self.getvalue(key))
else:
return self.getvalue(key)
def __setitem__(self, key, leaf):
super(PerCpuArray, self).__setitem__(key, leaf)
def sum(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default sum functions")
return self.sLeaf(reduce(lambda x,y: x+y, self.getvalue(key)))
def max(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default max functions")
return self.sLeaf(max(self.getvalue(key)))
def average(self, key):
result = self.sum(key)
result.value/=self.total_cpu
return result
class StackTrace(TableBase):
MAX_DEPTH = 127
def __init__(self, *args, **kwargs):
super(StackTrace, self).__init__(*args, **kwargs)
class StackWalker(object):
def __init__(self, stack, resolve=None):
self.stack = stack
self.n = -1
self.resolve = resolve
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.n += 1
if self.n == StackTrace.MAX_DEPTH:
raise StopIteration()
addr = self.stack.ip[self.n]
if addr == 0 :
raise StopIteration()
return self.resolve(addr) if self.resolve else addr
def walk(self, stack_id, resolve=None):
return StackTrace.StackWalker(self[self.Key(stack_id)], resolve)
def __len__(self):
i = 0
for k in self: i += 1
return i
def __delitem__(self, key):
key_p = ct.pointer(key)
res = lib.bpf_delete_elem(self.map_fd, ct.cast(key_p, ct.c_void_p))
if res < 0:
raise KeyError
def clear(self):
pass
|
the-stack_0_10303 | # (C) Copyright 2017 IBM Corp.
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Click command definition for the server command group which includes
cmds for inspection and management of the objects defined by the pywbem
server class including namespaces, WBEMServer information, and profile
information.
NOTE: Commands are ordered in help display by their order in this file.
"""
from __future__ import absolute_import, print_function
import os
import sys
import click
import six
from pywbem import Error, MOFCompiler, ModelError
from pywbem._mof_compiler import MOFWBEMConnection, MOFCompileError
from pywbem._nocasedict import NocaseDict
from nocaselist import NocaseList
from .pywbemcli import cli
from ._common import pywbem_error_exception, parse_version_value, \
is_experimental_class
from ._common_options import namespace_option
from ._cmd_namespace import cmd_namespace_list, cmd_namespace_interop
from .._utils import pywbemtools_warn
from .._click_extensions import PywbemtoolsGroup, PywbemtoolsCommand, \
CMD_OPTS_TXT, GENERAL_OPTS_TXT, SUBCMD_HELP_TXT
from .._options import add_options, help_option
from .._output_formatting import validate_output_format, format_table, \
display_text, fold_strings
# NOTE: A number of the options use double-dash as the short form. In those
# cases, a third definition of the options without the double-dash defines
# the corresponding option name, ex. 'include_qualifiers'. It should be
# defined with underscore and not dash
# Issue 224 - Exception in prompt-toolkit with python 2.7. Caused because
# with prompt-toolkit 2 + the completer requires unicode and click_repl not
# passing help as unicode in options as unicode
# NOTE: Insure that all option help attributes are unicode to get around this
# issue
#
# Common option definitions for server group
#
mof_include_option = [ # pylint: disable=invalid-name
click.option('--include', '-I', metavar='INCLUDEDIR', multiple=True,
help=u'Path name of a MOF include directory. '
'May be specified multiple times.')]
mof_dry_run_option = [ # pylint: disable=invalid-name
click.option('--dry-run', '-d', is_flag=True, default=False,
help=u'Enable dry-run mode: Don\'t actually modify the '
'server. Connection to the server is still required for '
'reading.')]
@cli.group('server', cls=PywbemtoolsGroup, options_metavar=GENERAL_OPTS_TXT,
subcommand_metavar=SUBCMD_HELP_TXT)
@add_options(help_option)
def server_group():
"""
Command group for WBEM servers.
This command group defines commands to inspect and manage core components
of a WBEM server including server attributes, namespaces, compiling MOF,
the Interop namespace and schema information.
In addition to the command-specific options shown in this help text, the
general options (see 'pywbemcli --help') can also be specified before the
'server' keyword.
"""
pass # pylint: disable=unnecessary-pass
@server_group.command('namespaces', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_namespaces(context):
"""
List the namespaces of the server (deprecated).
The Interop namespace must exist on the server.
Deprecated: The 'server namespaces' command is deprecated and will be
removed in a future version. Use the 'namespace list' command instead.
"""
pywbemtools_warn(
"The 'server namespaces' command is deprecated and will be removed in "
"a future version. Use the 'namespace list' command instead.",
DeprecationWarning)
context.execute_cmd(lambda: cmd_namespace_list(context))
@server_group.command('interop', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_interop(context):
"""
Get the Interop namespace of the server (deprecated).
The Interop namespace must exist on the server.
Deprecated: The 'server interop' command is deprecated and will be removed
in a future version. Use the 'namespace interop' command instead.
"""
pywbemtools_warn(
"The 'server interop' command is deprecated and will be removed in "
"a future version. Use the 'namespace interop' command instead.",
DeprecationWarning)
context.execute_cmd(lambda: cmd_namespace_interop(context))
@server_group.command('brand', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_brand(context):
"""
Get the brand of the server.
Brand information is defined by the server implementor and may or may
not be available. Pywbem attempts to collect the brand information from
multiple sources.
"""
# pylint: disable=too-many-function-args
context.execute_cmd(lambda: cmd_server_brand(context))
@server_group.command('info', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_info(context):
"""
Get information about the server.
The information includes CIM namespaces and server brand.
"""
context.execute_cmd(lambda: cmd_server_info(context))
@server_group.command('add-mof', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('moffiles', metavar='MOFFILE', type=click.Path(),
nargs=-1, required=True)
@add_options(namespace_option)
@add_options(mof_include_option)
@add_options(mof_dry_run_option)
@add_options(help_option)
@click.pass_obj
def server_add_mof(context, **options):
"""
Compile MOF and add/update CIM objects in the server.
The MOF files are specified with the MOFFILE argument, which may be
specified multiple times. The minus sign ('-') specifies the standard
input.
Initially, the target namespace is the namespace specified with the
--namespace option or if not specified the default namespace of the
connection. If the MOF contains '#pragma namespace' directives, the target
namespace will be changed accordingly.
MOF include files (specified with the '#pragma include' directive) are
searched first in the directory of the including MOF file, and then in
the directories specified with the --include option.
Any CIM objects (instances, classes and qualifiers) specified in the MOF
files are created in the server, or modified if they already exist in the
server.
The global --verbose option will show the CIM objects that are created or
modified.
"""
context.execute_cmd(lambda: cmd_server_add_mof(context, options))
@server_group.command('remove-mof', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('moffiles', metavar='MOFFILE', type=click.Path(),
nargs=-1, required=True)
@add_options(namespace_option)
@add_options(mof_include_option)
@add_options(mof_dry_run_option)
@add_options(help_option)
@click.pass_obj
def server_remove_mof(context, **options):
"""
Compile MOF and remove CIM objects from the server.
The MOF files are specified with the MOFFILE argument, which may be
specified multiple times. The minus sign ('-') specifies the standard
input.
Initially, the target namespace is the namespace specified with the
--namespace option or if not specified the default namespace of the
connection. If the MOF contains '#pragma namespace' directives, the target
namespace will be changed accordingly.
MOF include files (specified with the '#pragma include' directive) are
searched first in the directory of the including MOF file, and then in
the directories specified with the --include option.
Any CIM objects (instances, classes and qualifiers) specified in the MOF
files are deleted from the server.
The global --verbose option will show the CIM objects that are removed.
"""
context.execute_cmd(lambda: cmd_server_remove_mof(context, options))
@server_group.command('schema', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(namespace_option)
@click.option('-d', '--detail', is_flag=True, default=False,
help=u'Display details about each schema in the namespace rather '
u'than accumulated for the namespace.')
@add_options(help_option)
@click.pass_obj
def server_schema(context, **options):
"""
Get information about the server schemas.
Gets information about the schemas and CIM schemas that define the classes
in each namespace. The information provided includes:
* The released DMTF CIM schema version that was the source for the
qualifier declarations and classes for the namespace.
* Experimental vs. final elements in the schema
* Schema name (defined by the prefix on each class before the first '_')
* Class count
"""
context.execute_cmd(lambda: cmd_server_schema(context, options))
###############################################################
# Server cmds
###############################################################
def cmd_server_brand(context):
"""
Display product and version info of the current WBEM server
"""
wbem_server = context.pywbem_server.wbem_server
output_format = validate_output_format(context.output_format, 'TEXT')
try:
brand = wbem_server.brand
context.spinner_stop()
display_text(brand, output_format)
except Error as er:
raise pywbem_error_exception(er)
def cmd_server_info(context):
"""
Display general overview of info from current WBEM server
"""
wbem_server = context.pywbem_server.wbem_server
output_format = validate_output_format(context.output_format, 'TABLE')
try:
# Execute the namespaces to force contact with server before
# turning off the spinner.
namespaces = sorted(wbem_server.namespaces)
context.spinner_stop()
rows = []
headers = ['Brand', 'Version', 'Interop Namespace', 'Namespaces']
sep = '\n' if namespaces and len(namespaces) > 3 else ', '
namespaces = sep.join(namespaces)
rows.append([wbem_server.brand, wbem_server.version,
wbem_server.interop_ns,
namespaces])
click.echo(format_table(rows, headers,
title='Server General Information',
table_format=output_format))
except Error as er:
raise pywbem_error_exception(er)
def cmd_server_add_mof(context, options):
"""
Compile MOF and add/update CIM objects in the server.
"""
conn = context.pywbem_server.conn
try:
context.spinner_stop()
# Define the connection to be used by the MOF compiler.
# MOFWBEMConnection writes resulting CIM objects to a local store
# but reads from the connection.
if options['dry_run']:
comp_handle = MOFWBEMConnection(conn=conn)
else:
comp_handle = conn
if options['dry_run']:
print('Executing in dry-run mode')
include_dirs = []
for idir in options['include']:
if not os.path.isabs(idir):
idir = os.path.abspath(idir)
include_dirs.append(idir)
for moffile in options['moffiles']:
if moffile != '-':
mofdir = os.path.dirname(moffile)
if not os.path.isabs(mofdir):
mofdir = os.path.abspath(mofdir)
for idir in include_dirs:
if mofdir.startswith(idir):
break
else:
include_dirs.append(mofdir)
mofcomp = MOFCompiler(handle=comp_handle, search_paths=include_dirs,
verbose=context.verbose)
for moffile in options['moffiles']:
if moffile == '-':
mofstr = sys.stdin.read() # bytes in py2 / text in py3
if context.verbose:
print('Compiling MOF from standard input')
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_string(mofstr, options['namespace'])
else:
if not os.path.isabs(moffile):
moffile = os.path.abspath(moffile)
if context.verbose:
print('Compiling MOF file {0}'.format(moffile))
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_file(moffile, options['namespace'])
# If MOFCompileError, exception already logged by compile_string().
except MOFCompileError:
raise click.ClickException("Compile failed.")
# Otherwise display the exception itself
except Error as exc:
raise pywbem_error_exception(exc)
def cmd_server_remove_mof(context, options):
"""
Compile MOF and remove CIM objects from the server.
"""
conn = context.pywbem_server.conn
try:
context.spinner_stop()
# Define the connection to be used by the MOF compiler.
# MOFWBEMConnection writes resulting CIM objects to a local store
# but reads from the connection.
comp_handle = MOFWBEMConnection(conn=conn)
if options['dry_run']:
print('Executing in dry-run mode')
include_dirs = []
for idir in options['include']:
if not os.path.isabs(idir):
idir = os.path.abspath(idir)
include_dirs.append(idir)
for moffile in options['moffiles']:
if moffile != '-':
mofdir = os.path.dirname(moffile)
if not os.path.isabs(mofdir):
mofdir = os.path.abspath(mofdir)
for idir in include_dirs:
if mofdir.startswith(idir):
break
else:
include_dirs.append(mofdir)
# verbose messages are displayed by rollback()
mofcomp = MOFCompiler(handle=comp_handle, search_paths=include_dirs,
verbose=False)
for moffile in options['moffiles']:
if moffile == '-':
mofstr = sys.stdin.read() # bytes in py2 / text in py3
if context.verbose:
print('Compiling MOF from standard input into cache')
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_string(mofstr, options['namespace'])
else:
if not os.path.isabs(moffile):
moffile = os.path.abspath(moffile)
if context.verbose:
print('Compiling MOF file {0} into cache'.format(moffile))
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_file(moffile, options['namespace'])
# rollback the compiled objects to remove them from the target.
if not options['dry_run']:
if context.verbose:
print('Deleting CIM objects found in MOF...')
comp_handle.rollback(verbose=context.verbose)
else:
if context.verbose:
print('No deletions will be shown in dry-run mode')
# If MOFCompileError, exception already logged by compile_string().
except MOFCompileError:
raise click.ClickException("Compile failed.")
except Error as exc:
raise pywbem_error_exception(exc)
def cmd_server_schema(context, options):
"""
The schema command provides information on the CIM model in each namespace
including the CIM Schema's defined, the DMTF Release schema version, whether
the namespace/schema includes classes with the experimental qualifier, and
the count of classes for the namespace and for each schema..
"""
# The schema names that can be considered DMTF schemas and are part of
# the dmtf_cim_schema
possible_dmtf_schemas = NocaseList(['CIM', 'PRS'])
def experimental_display(value):
"""Return string Experimental or empty sting"""
return 'Experimental' if value else ''
def schema_display(schema):
"""Replace dummy name for no-schema with real text"""
if schema == "~~~":
return "(no-schema)"
return schema
def version_str(version_tuple):
"""Convert 3 integer tuple to string (1.2.3) or empty strig"""
if all(i == version_tuple[0] for i in version_tuple):
return ""
return ".".join([str(i) for i in version_tuple])
conn = context.pywbem_server.conn
wbem_server = context.pywbem_server.wbem_server
output_format = validate_output_format(context.output_format, 'TABLE')
namespace_opt = options['namespace']
# Get namespaces. This bypasses the issue whene there is no interop
# namespace
try:
namespaces = [namespace_opt] if namespace_opt else \
wbem_server.namespaces
except ModelError:
namespaces = [wbem_server.conn.default_namespace]
detail = options['detail']
rows = []
for ns in sorted(namespaces):
klasses = conn.EnumerateClasses(namespace=ns, DeepInheritance=True,
LocalOnly=True)
classes_count = len(klasses)
# namespace level variables for experimental status and max version
ns_experimental = False
ns_max_dmtf_version = [0, 0, 0]
# Dictionaries for schemas, schema_max_version and experimental status
# per schema found in the namespaces
schemas = NocaseDict() # Schema names are case independent
schema_max_ver = NocaseDict()
schema_experimental = NocaseDict()
no_schema = []
for klass in klasses:
schema_elements = klass.classname.split('_', 1)
schema = schema_elements[0] if len(schema_elements) > 1 \
else "~~~" # this is dummy for sort that is replaced later.
schemas[schema] = schemas.get(schema, 0) + 1
if len(schema_elements) < 2:
no_schema.append(klass.classname)
if schema not in schema_max_ver:
schema_max_ver[schema] = [0, 0, 0]
this_class_experimental = False
# Determine if experimental qualifier exists and set namespace
# level experimental flag.
if ns_experimental is False:
if is_experimental_class(klass):
ns_experimental = True
this_class_experimental = True
# If detail, set the schema level experimental flag
if detail:
if schema not in schema_experimental:
schema_experimental[schema] = False
if this_class_experimental:
schema_experimental[schema] = True
elif ns_experimental:
if schema_experimental[schema] is False:
if is_experimental_class(klass):
schema_experimental[schema] = True
# Get the version qualifier for this class
if 'Version' in klass.qualifiers:
version = klass.qualifiers['Version'].value
version = parse_version_value(version, klass.classname)
# update the namespace max version if this schema is a
# DMTF schema and not previously found
if schema in possible_dmtf_schemas:
if version > ns_max_dmtf_version:
ns_max_dmtf_version = version
# update the version in the schema_max_ver dictionary
if schema not in schema_max_ver or \
version > schema_max_ver[schema]:
schema_max_ver[schema] = version
# Build the table formatted output
prev_namespace = None
ns_version_str = version_str(ns_max_dmtf_version) \
if classes_count else ""
if detail:
headers = ['Namespace', 'schemas', 'classes\ncount',
'schema\nversion', 'experimental']
# Display with a line for each namespace and one for each
# schema in the namespace
# replace the dummy "~~~" with the output text
for schema in sorted(schemas.keys()):
schema_max_ver_str = version_str(schema_max_ver[schema])
# Set the namespace in first row for each new namespace found
if ns != prev_namespace:
prev_namespace = ns
ns_display = ns
else:
ns_display = ""
# Append the row for each schema in the namespace
rows.append([ns_display, # namespace. don't repeat
schema_display(schema), # CIM schema
schemas[schema], #
schema_max_ver_str, # schema version
experimental_display(schema_experimental[schema])])
else: # display non-detail report
# Display one line for each namespace with list of schemas in the
# namespace
headers = ['Namespace', 'schemas', 'classes\ncount',
'CIM schema\nversion', 'experimental']
schemas_str = ", ".join(sorted(list(six.iterkeys(schemas))))
schemas_str = schemas_str.replace('~~~', '(no-schema)')
folded_schemas = fold_strings(schemas_str, 45,
fold_list_items=False)
rows.append([ns,
folded_schemas,
classes_count,
ns_version_str,
experimental_display(ns_experimental)
])
# if output_format_is_table(context.output_format):
title = "Schema information{0} namespaces: {1};".format(
'; detail;' if detail else ";", namespace_opt or "all")
context.spinner_stop()
click.echo(format_table(rows,
headers,
title=title,
table_format=output_format))
|
the-stack_0_10304 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from soql.attributes import Integer, Relationship, String
from soql import Model
from soql import select
from soql import SelectClauseIsntValidSubquery
from soql import asc, desc, nulls_first, nulls_last
from tests.helpers import SoqlAssertions
class Grandparent(Model):
id = Integer('Id')
class Parent(Model):
id = Integer('Id')
name = String('Name')
age = Integer('Age')
mom = Relationship('Mom', related_model=Grandparent)
class Child(Model):
id = Integer('Id')
name = String('Name')
mom = Relationship('Mom', related_model=Parent)
dad = Relationship('Dad', related_model=Parent)
teacher = Relationship('Teacher', related_model='Teacher')
class Teacher(Model):
id = Integer('Id')
students = Relationship('Students', related_model=Child, many=True)
class SelectTest(unittest.TestCase, SoqlAssertions):
def test_select(self):
self.assertSoqlEqual(
select(Child),
"SELECT Child.Id, Child.Name "
"FROM Child"
)
def test_joins(self):
self.assertSoqlEqual(
select(Child).join(Child.mom),
"SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, Child.Mom.Name "
"FROM Child"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students),
"SELECT Teacher.Id, (SELECT Child.Id, Child.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students).join(Teacher.students.mom),
"SELECT Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, Child.Mom.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students.mom),
"SELECT Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, Child.Mom.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Child).join(Child.mom.mom),
"SELECT Child.Id, Child.Name, Child.Mom.Age, "
"Child.Mom.Id, Child.Mom.Name, Child.Mom.Mom.Id "
"FROM Child"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students.mom).join(
Teacher.students.dad),
"SELECT Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Dad.Age, Child.Dad.Id, Child.Dad.Name, "
"Child.Mom.Age, Child.Mom.Id, Child.Mom.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Child).join(Child.teacher.students.mom),
"SELECT Child.Id, Child.Name, Child.Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, "
"Child.Mom.Name FROM Child.Teacher.Students) "
"FROM Child"
)
def test_filters(self):
self.assertSoqlEqual(
select(Child).where(Child.id == '123'),
"SELECT Child.Id, Child.Name "
"FROM Child "
"WHERE Child.Id = '123'"
)
self.assertSoqlEqual(
select(Child).where(Child.id == '123').where(Child.name == 'Jill'),
"SELECT Child.Id, Child.Name "
"FROM Child "
"WHERE Child.Id = '123' AND Child.Name = 'Jill'"
)
self.assertSoqlEqual(
select(Child).where(Child.name == u'CATMONKÈ-123490'),
u"SELECT Child.Id, Child.Name "
u"FROM Child "
u"WHERE Child.Name = 'CATMONKÈ-123490'"
)
def test_order_by(self):
self.assertSoqlEqual(
select(Parent).order_by(Parent.age),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age).order_by(Parent.id),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age, Parent.Id"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=desc),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age DESC"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=desc).order_by(Parent.id, direction=asc),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age DESC, Parent.Id ASC"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=asc, nulls_position=nulls_first),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age ASC NULLS FIRST"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=desc, nulls_position=nulls_last),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age DESC NULLS LAST"
)
def test_count(self):
self.assertSoqlEqual(
select(Child).count(),
"SELECT COUNT() "
"FROM Child"
)
def test_offset_and_limit(self):
self.assertSoqlEqual(
select(Child).limit(100),
"SELECT Child.Id, Child.Name "
"FROM Child "
"LIMIT 100"
)
self.assertSoqlEqual(
select(Child).offset(100),
"SELECT Child.Id, Child.Name "
"FROM Child "
"OFFSET 100"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age).offset(100).limit(100),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age "
"LIMIT 100 "
"OFFSET 100"
)
def test_override_columns(self):
self.assertSoqlEqual(
select(Parent).columns(Parent.id),
"SELECT Parent.Id "
"FROM Parent"
)
self.assertSoqlEqual(
select(Parent).columns(Parent.id, Parent.name),
"SELECT Parent.Id, Parent.Name "
"FROM Parent"
)
def test_subquery(self):
self.assertSoqlEqual(
select(Parent).columns(Parent.id).subquery(),
"(SELECT Parent.Id FROM Parent)"
)
subquery = select(Parent).columns(Parent.name).subquery()
self.assertSoqlEqual(
select(Child).where(Child.name.in_(subquery)),
"SELECT Child.Id, Child.Name "
"FROM Child "
"WHERE Child.Name IN (SELECT Parent.Name FROM Parent)"
)
with self.assertRaises(SelectClauseIsntValidSubquery):
select(Parent).offset(100).subquery()
|
the-stack_0_10305 | import numpy as np
from torch.nn import functional as F
from ConSSL.utils import _PIL_AVAILABLE
from ConSSL.utils.warnings import warn_missing_pkg
if _PIL_AVAILABLE:
from PIL import Image
else: # pragma: no cover
warn_missing_pkg('PIL', pypi_name='Pillow')
class RandomTranslateWithReflect:
"""
Translate image randomly
Translate vertically and horizontally by n pixels where
n is integer drawn uniformly independently for each axis
from [-max_translation, max_translation].
Fill the uncovered blank area with reflect padding.
"""
def __init__(self, max_translation):
if not _PIL_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError("You want to use `Pillow` which is not installed yet.")
self.max_translation = max_translation
def __call__(self, old_image):
xtranslation, ytranslation = np.random.randint(-self.max_translation, self.max_translation + 1, size=2)
xpad, ypad = abs(xtranslation), abs(ytranslation)
xsize, ysize = old_image.size
flipped_lr = old_image.transpose(Image.FLIP_LEFT_RIGHT)
flipped_tb = old_image.transpose(Image.FLIP_TOP_BOTTOM)
flipped_both = old_image.transpose(Image.ROTATE_180)
new_image = Image.new("RGB", (xsize + 2 * xpad, ysize + 2 * ypad))
new_image.paste(old_image, (xpad, ypad))
new_image.paste(flipped_lr, (xpad + xsize - 1, ypad))
new_image.paste(flipped_lr, (xpad - xsize + 1, ypad))
new_image.paste(flipped_tb, (xpad, ypad + ysize - 1))
new_image.paste(flipped_tb, (xpad, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad - xsize + 1, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad + xsize - 1, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad - xsize + 1, ypad + ysize - 1))
new_image.paste(flipped_both, (xpad + xsize - 1, ypad + ysize - 1))
new_image = new_image.crop(
(xpad - xtranslation, ypad - ytranslation, xpad + xsize - xtranslation, ypad + ysize - ytranslation)
)
return new_image
class Patchify(object):
def __init__(self, patch_size, overlap_size):
self.patch_size = patch_size
self.overlap_size = self.patch_size - overlap_size
def __call__(self, x):
x = x.unsqueeze(0)
b, c, h, w = x.size()
# patch up the images
# (b, c, h, w) -> (b, c*patch_size, L)
x = F.unfold(x, kernel_size=self.patch_size, stride=self.overlap_size)
# (b, c*patch_size, L) -> (b, nb_patches, width, height)
x = x.transpose(2, 1).contiguous().view(b, -1, self.patch_size, self.patch_size)
# reshape to have (b x patches, c, h, w)
x = x.view(-1, c, self.patch_size, self.patch_size)
x = x.squeeze(0)
return x
|
the-stack_0_10306 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rocthrust(CMakePackage):
"""Thrust is a parallel algorithm library. This library has been ported to
HIP/ROCm platform, which uses the rocPRIM library. The HIP ported
library works on HIP/ROCm platforms"""
homepage = "https://github.com/ROCmSoftwarePlatform/rocThrust"
url = "https://github.com/ROCmSoftwarePlatform/rocThrust/archive/rocm-3.10.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('3.10.0', sha256='31bea6cd19a0ffa15e4ab50ecde2402ea5aaa182149cfab98242357e41f1805b')
version('3.9.0', sha256='65f5e74d72c5aaee90459468d693b212af7d56e31098ee8237b18d1b4d620eb0')
version('3.8.0', sha256='39350aeb8bfbcd09e387717b2a05c7e3a19e0fa85ff4284b967bb8fae12f9013')
version('3.7.0', sha256='4cb923dde5eec150a566cb10d23ee5c7ce3aa892c4dea94886a89d95b90f3bdd')
version('3.5.0', sha256='0d1bac1129d17bb1259fd06f5c9cb4c1620d1790b5c295b866fb3442d18923cb')
variant('build_type', default='Release', values=("Release", "Debug"),
description='CMake build type')
depends_on('cmake@3:', type='build')
depends_on('numactl', when='@3.7.0:')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0']:
depends_on('hip@' + ver, type='build', when='@' + ver)
depends_on('rocm-device-libs@' + ver, type='build', when='@' + ver)
depends_on('comgr@' + ver, type='build', when='@' + ver)
depends_on('hsa-rocr-dev@' + ver, type='build', when='@' + ver)
depends_on('rocprim@' + ver, type='build', when='@' + ver)
def setup_build_environment(self, env):
env.set('CXX', self.spec['hip'].hipcc)
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_MODULE_PATH={0}/cmake'.format(spec['hip'].prefix)
]
return args
|
the-stack_0_10307 | import numpy as np
import pandas as pd
import os
def read_data():
# set the path of the raw data
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
# read the data with all default parameters
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
#We don't have the Survived field in Test, so let's fill it with a default so we can
#concat test and train together
test_df['Survived'] = -888
df = pd.concat((train_df, test_df), axis=0)
return df
def process_data(df):
# using the method chaining concept - this is different from the code we wrote in ecah cell
# we can chain methods, and the next function uses the output of the previous
return (df
# create title attribute - then add this
.assign(Title = lambda x: x.Name.map(get_title))
# working missing values - start with this
.pipe(fill_missing_values) #This lets us apply a function into the data frame
# create fare bin feature
.assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low','low','high','very_high']))
# create age state
.assign(AgeState = lambda x : np.where(x.Age >= 18, 'Adult','Child'))
.assign(FamilySize = lambda x : x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x : np.where(((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title != 'Miss')), 1, 0))
# create deck feature
.assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin))
.assign(Deck = lambda x : x.Cabin.map(get_deck))
# feature encoding
.assign(IsMale = lambda x : np.where(x.Sex == 'male', 1,0))
.pipe(pd.get_dummies, columns=['Deck', 'Pclass','Title', 'Fare_Bin', 'Embarked','AgeState'])
# add code to drop unnecessary columns
.drop(['Cabin','Name','Ticket','Parch','SibSp','Sex'], axis=1) #no need for inplace option here, since we are using chaining
# reorder columns
.pipe(reorder_columns)
)
def get_title(name):
title_group = {'mr' : 'Mr',
'mrs' : 'Mrs',
'miss' : 'Miss',
'master' : 'Master',
'don' : 'Sir',
'rev' : 'Sir',
'dr' : 'Officer',
'mme' : 'Mrs',
'ms' : 'Mrs',
'major' : 'Officer',
'lady' : 'Lady',
'sir' : 'Sir',
'mlle' : 'Miss',
'col' : 'Officer',
'capt' : 'Officer',
'the countess' : 'Lady',
'jonkheer' : 'Sir',
'dona' : 'Lady'
}
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title_group[title]
def get_deck(cabin):
return np.where(pd.notnull(cabin),str(cabin)[0].upper(),'Z')
def fill_missing_values(df):
# embarked
df.Embarked.fillna('C', inplace=True)
# fare
median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median()
df.Fare.fillna(median_fare, inplace=True)
# age
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median , inplace=True)
return df
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir,'data','processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
# train data
df[df.Survived != -888].to_csv(write_train_path)
# test data
columns = [column for column in df.columns if column != 'Survived']
df[df.Survived == -888][columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df) |
the-stack_0_10308 | import functools
import operator
from collections import namedtuple
from json import dumps, loads
from galaxy_test.base.populators import skip_without_tool, summarize_instance_history_on_error
from .test_workflows import BaseWorkflowsApiTestCase
class WorkflowExtractionApiTestCase(BaseWorkflowsApiTestCase):
history_id: str
def setUp(self):
super().setUp()
self.history_id = self.dataset_populator.new_history()
@skip_without_tool("cat1")
@summarize_instance_history_on_error
def test_extract_from_history(self):
# Run the simple test workflow and extract it back out from history
cat1_job_id = self.__setup_and_run_cat1_workflow(history_id=self.history_id)
contents = self._history_contents()
input_hids = [c["hid"] for c in contents[0:2]]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_basic",
dataset_ids=input_hids,
job_ids=[cat1_job_id],
)
self.assertEqual(downloaded_workflow["name"], "test import from history")
self.__assert_looks_like_cat1_example_workflow(downloaded_workflow)
@summarize_instance_history_on_error
def test_extract_with_copied_inputs(self):
old_history_id = self.dataset_populator.new_history()
# Run the simple test workflow and extract it back out from history
self.__setup_and_run_cat1_workflow(history_id=old_history_id)
# Bug cannot mess up hids or these don't extract correctly. See Trello card here:
# https://trello.com/c/mKzLbM2P
# # create dummy dataset to complicate hid mapping
# self.dataset_populator.new_dataset( history_id, content="dummydataset" )
# offset = 1
offset = 0
old_contents = self._history_contents(old_history_id)
for old_dataset in old_contents:
self.__copy_content_to_history(self.history_id, old_dataset)
new_contents = self._history_contents()
input_hids = [c["hid"] for c in new_contents[(offset + 0):(offset + 2)]]
cat1_job_id = self.__job_id(self.history_id, new_contents[(offset + 2)]["id"])
def reimport_jobs_ids(new_history_id):
return [j["id"] for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] == "cat1"]
downloaded_workflow = self._extract_and_download_workflow(
dataset_ids=input_hids,
job_ids=[cat1_job_id],
)
self.__assert_looks_like_cat1_example_workflow(downloaded_workflow)
@summarize_instance_history_on_error
def test_extract_with_copied_inputs_reimported(self):
old_history_id = self.dataset_populator.new_history()
# Run the simple test workflow and extract it back out from history
self.__setup_and_run_cat1_workflow(history_id=old_history_id)
offset = 0
old_contents = self._history_contents(old_history_id)
for old_dataset in old_contents:
self.__copy_content_to_history(self.history_id, old_dataset)
new_contents = self._history_contents()
input_hids = [c["hid"] for c in new_contents[(offset + 0):(offset + 2)]]
def reimport_jobs_ids(new_history_id):
return [j["id"] for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] == "cat1"]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_with_copied_inputs",
reimport_jobs_ids=reimport_jobs_ids,
dataset_ids=input_hids,
)
self.__assert_looks_like_cat1_example_workflow(downloaded_workflow)
@skip_without_tool("random_lines1")
@summarize_instance_history_on_error
def test_extract_mapping_workflow_from_history(self):
hdca, job_id1, job_id2 = self.__run_random_lines_mapped_over_pair(self.history_id)
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_with_mapping",
dataset_collection_ids=[hdca["hid"]],
job_ids=[job_id1, job_id2],
)
self.__assert_looks_like_randomlines_mapping_workflow(downloaded_workflow)
def test_extract_copied_mapping_from_history(self):
old_history_id = self.dataset_populator.new_history()
hdca, job_id1, job_id2 = self.__run_random_lines_mapped_over_pair(old_history_id)
old_contents = self._history_contents(old_history_id)
for old_content in old_contents:
self.__copy_content_to_history(self.history_id, old_content)
# API test is somewhat contrived since there is no good way
# to retrieve job_id1, job_id2 like this for copied dataset
# collections I don't think.
downloaded_workflow = self._extract_and_download_workflow(
dataset_collection_ids=[hdca["hid"]],
job_ids=[job_id1, job_id2],
)
self.__assert_looks_like_randomlines_mapping_workflow(downloaded_workflow)
def test_extract_copied_mapping_from_history_reimported(self):
import unittest
raise unittest.SkipTest("Mapping connection for copied collections not yet implemented in history import/export")
old_history_id = self.dataset_populator.new_history()
hdca, job_id1, job_id2 = self.__run_random_lines_mapped_over_singleton(old_history_id)
old_contents = self._history_contents(old_history_id)
for old_content in old_contents:
self.__copy_content_to_history(self.history_id, old_content)
def reimport_jobs_ids(new_history_id):
rval = [j["id"] for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] == "random_lines1"]
assert len(rval) == 2
print(rval)
return rval
# API test is somewhat contrived since there is no good way
# to retrieve job_id1, job_id2 like this for copied dataset
# collections I don't think.
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_from_history_with_mapped_collection_reimport",
reimport_jobs_ids=reimport_jobs_ids,
reimport_wait_on_history_length=9, # see comments in _extract about eliminating this magic constant.
dataset_collection_ids=[hdca["hid"]],
)
self.__assert_looks_like_randomlines_mapping_workflow(downloaded_workflow)
@skip_without_tool("random_lines1")
@skip_without_tool("multi_data_param")
def test_extract_reduction_from_history(self):
hdca = self.dataset_collection_populator.create_pair_in_history(self.history_id, contents=["1 2 3\n4 5 6", "7 8 9\n10 11 10"]).json()
hdca_id = hdca["id"]
inputs1 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]},
"num_lines": 2
}
implicit_hdca1, job_id1 = self._run_tool_get_collection_and_job_id(self.history_id, "random_lines1", inputs1)
inputs2 = {
"f1": {"src": "hdca", "id": implicit_hdca1["id"]},
"f2": {"src": "hdca", "id": implicit_hdca1["id"]},
}
reduction_run_output = self.dataset_populator.run_tool(
tool_id="multi_data_param",
inputs=inputs2,
history_id=self.history_id,
)
job_id2 = reduction_run_output["jobs"][0]["id"]
self.dataset_populator.wait_for_job(job_id2, assert_ok=True)
self.dataset_populator.wait_for_history(self.history_id, assert_ok=True)
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_with_reduction",
dataset_collection_ids=[hdca["hid"]],
job_ids=[job_id1, job_id2],
)
assert len(downloaded_workflow["steps"]) == 3
collect_step_idx = self._assert_first_step_is_paired_input(downloaded_workflow)
tool_steps = self._get_steps_of_type(downloaded_workflow, "tool", expected_len=2)
random_lines_map_step = tool_steps[0]
reduction_step = tool_steps[1]
assert "tool_id" in random_lines_map_step, random_lines_map_step
assert random_lines_map_step["tool_id"] == "random_lines1", random_lines_map_step
assert "input_connections" in random_lines_map_step, random_lines_map_step
random_lines_input_connections = random_lines_map_step["input_connections"]
assert "input" in random_lines_input_connections, random_lines_map_step
random_lines_input = random_lines_input_connections["input"]
assert random_lines_input["id"] == collect_step_idx
reduction_step_input = reduction_step["input_connections"]["f1"]
assert reduction_step_input["id"] == random_lines_map_step["id"]
@skip_without_tool("collection_paired_test")
def test_extract_workflows_with_dataset_collections(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input_collection
- tool_id: collection_paired_test
state:
f1:
$link: text_input1
test_data:
text_input1:
collection_type: paired
""")
job_id = self._job_id_for_tool(jobs_summary.jobs, "collection_paired_test")
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_with_basic_collections",
dataset_collection_ids=["1"],
job_ids=[job_id],
)
self.__check_workflow(
downloaded_workflow,
step_count=2,
verify_connected=True,
data_input_count=0,
data_collection_input_count=1,
tool_ids=["collection_paired_test"]
)
collection_step = self._get_steps_of_type(downloaded_workflow, "data_collection_input", expected_len=1)[0]
collection_step_state = loads(collection_step["tool_state"])
self.assertEqual(collection_step_state["collection_type"], "paired")
@skip_without_tool("cat_collection")
def test_subcollection_mapping(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input_collection
- label: noop
tool_id: cat1
state:
input1:
$link: text_input1
- tool_id: cat_collection
state:
input1:
$link: noop/out_file1
test_data:
text_input1:
collection_type: "list:paired"
""")
job1_id = self._job_id_for_tool(jobs_summary.jobs, "cat1")
job2_id = self._job_id_for_tool(jobs_summary.jobs, "cat_collection")
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_subcollection_mapping",
dataset_collection_ids=["1"],
job_ids=[job1_id, job2_id],
)
self.__check_workflow(
downloaded_workflow,
step_count=3,
verify_connected=True,
data_input_count=0,
data_collection_input_count=1,
tool_ids=["cat_collection", "cat1"],
)
collection_step = self._get_steps_of_type(downloaded_workflow, "data_collection_input", expected_len=1)[0]
collection_step_state = loads(collection_step["tool_state"])
self.assertEqual(collection_step_state["collection_type"], "list:paired")
@skip_without_tool("cat_list")
@skip_without_tool("collection_creates_dynamic_nested")
def test_subcollection_reduction(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
creates_nested_list:
tool_id: collection_creates_dynamic_nested
reduce_nested_list:
tool_id: cat_list
in:
input1: creates_nested_list/list_output
""")
job1_id = self._job_id_for_tool(jobs_summary.jobs, "cat_list")
job2_id = self._job_id_for_tool(jobs_summary.jobs, "collection_creates_dynamic_nested")
self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_subcollection_reduction",
dataset_collection_ids=["1"],
job_ids=[job1_id, job2_id],
)
# TODO: refactor workflow extraction to not rely on HID, so we can actually properly connect
# this workflow
@skip_without_tool("collection_split_on_column")
def test_extract_workflow_with_output_collections(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input
- label: text_input2
type: input
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
queries:
- input2:
$link: text_input2
- label: split_up
tool_id: collection_split_on_column
state:
input1:
$link: cat_inputs/out_file1
- tool_id: cat_list
state:
input1:
$link: split_up/split_output
test_data:
text_input1: "samp1\t10.0\nsamp2\t20.0\n"
text_input2: "samp1\t30.0\nsamp2\t40.0\n"
""")
tool_ids = ["cat1", "collection_split_on_column", "cat_list"]
job_ids = [functools.partial(self._job_id_for_tool, jobs_summary.jobs)(_) for _ in tool_ids]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_output_collections",
dataset_ids=["1", "2"],
job_ids=job_ids,
)
self.__check_workflow(
downloaded_workflow,
step_count=5,
verify_connected=True,
data_input_count=2,
data_collection_input_count=0,
tool_ids=tool_ids,
)
@skip_without_tool("collection_creates_pair")
@summarize_instance_history_on_error
def test_extract_with_mapped_output_collections(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input_collection
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
- label: pair_off
tool_id: collection_creates_pair
state:
input1:
$link: cat_inputs/out_file1
- label: cat_pairs
tool_id: cat_collection
state:
input1:
$link: pair_off/paired_output
- tool_id: cat_list
state:
input1:
$link: cat_pairs/out_file1
test_data:
text_input1:
collection_type: list
elements:
- identifier: samp1
content: "samp1\t10.0\nsamp2\t20.0\n"
- identifier: samp2
content: "samp1\t30.0\nsamp2\t40.0\n"
""")
tool_ids = ["cat1", "collection_creates_pair", "cat_collection", "cat_list"]
job_ids = [functools.partial(self._job_id_for_tool, jobs_summary.jobs)(_) for _ in tool_ids]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_mapped_output_collections",
dataset_collection_ids=["1"],
job_ids=job_ids,
)
self.__check_workflow(
downloaded_workflow,
step_count=5,
verify_connected=True,
data_input_count=0,
data_collection_input_count=1,
tool_ids=tool_ids,
)
def _job_id_for_tool(self, jobs, tool_id):
return self._job_for_tool(jobs, tool_id)["id"]
def _job_for_tool(self, jobs, tool_id):
tool_jobs = [j for j in jobs if j["tool_id"] == tool_id]
if not tool_jobs:
raise ValueError(f"Failed to find job for tool {tool_id}")
# if len( tool_jobs ) > 1:
# assert False, "Found multiple jobs for tool %s" % tool_id
return tool_jobs[-1]
def __run_random_lines_mapped_over_pair(self, history_id):
hdca = self.dataset_collection_populator.create_pair_in_history(history_id, contents=["1 2 3\n4 5 6", "7 8 9\n10 11 10"]).json()
hdca_id = hdca["id"]
inputs1 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]},
"num_lines": 2
}
implicit_hdca1, job_id1 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs1)
inputs2 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": implicit_hdca1["id"]}]},
"num_lines": 1
}
_, job_id2 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs2)
return hdca, job_id1, job_id2
def __run_random_lines_mapped_over_singleton(self, history_id):
hdca = self.dataset_collection_populator.create_list_in_history(history_id, contents=["1 2 3\n4 5 6"]).json()
hdca_id = hdca["id"]
inputs1 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]},
"num_lines": 2
}
implicit_hdca1, job_id1 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs1)
inputs2 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": implicit_hdca1["id"]}]},
"num_lines": 1
}
_, job_id2 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs2)
return hdca, job_id1, job_id2
def __assert_looks_like_randomlines_mapping_workflow(self, downloaded_workflow):
# Assert workflow is input connected to a tool step with one output
# connected to another tool step.
assert len(downloaded_workflow["steps"]) == 3
collect_step_idx = self._assert_first_step_is_paired_input(downloaded_workflow)
tool_steps = self._get_steps_of_type(downloaded_workflow, "tool", expected_len=2)
tool_step_idxs = []
tool_input_step_idxs = []
for tool_step in tool_steps:
self._assert_has_key(tool_step["input_connections"], "input")
input_step_idx = tool_step["input_connections"]["input"]["id"]
tool_step_idxs.append(tool_step["id"])
tool_input_step_idxs.append(input_step_idx)
assert collect_step_idx not in tool_step_idxs
assert tool_input_step_idxs[0] == collect_step_idx
assert tool_input_step_idxs[1] == tool_step_idxs[0]
def __assert_looks_like_cat1_example_workflow(self, downloaded_workflow):
assert len(downloaded_workflow["steps"]) == 3
input_steps = self._get_steps_of_type(downloaded_workflow, "data_input", expected_len=2)
tool_step = self._get_steps_of_type(downloaded_workflow, "tool", expected_len=1)[0]
input1 = tool_step["input_connections"]["input1"]
input2 = tool_step["input_connections"]["queries_0|input2"]
self.assertEqual(input_steps[0]["id"], input1["id"])
self.assertEqual(input_steps[1]["id"], input2["id"])
def _history_contents(self, history_id=None):
if history_id is None:
history_id = self.history_id
return self._get(f"histories/{history_id}/contents").json()
def __copy_content_to_history(self, history_id, content):
if content["history_content_type"] == "dataset":
payload = dict(
source="hda",
content=content["id"]
)
response = self._post(f"histories/{history_id}/contents/datasets", payload, json=True)
else:
payload = dict(
source="hdca",
content=content["id"]
)
response = self._post(f"histories/{history_id}/contents/dataset_collections", payload, json=True)
self._assert_status_code_is(response, 200)
return response.json()
def __setup_and_run_cat1_workflow(self, history_id):
workflow = self.workflow_populator.load_workflow(name="test_for_extract")
workflow_request, history_id, workflow_id = self._setup_workflow_run(workflow, history_id=history_id)
run_workflow_response = self._post(f"workflows/{workflow_id}/invocations", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
return self.__cat_job_id(history_id)
def _assert_first_step_is_paired_input(self, downloaded_workflow):
collection_steps = self._get_steps_of_type(downloaded_workflow, "data_collection_input", expected_len=1)
collection_step = collection_steps[0]
collection_step_state = loads(collection_step["tool_state"])
self.assertEqual(collection_step_state["collection_type"], "paired")
collect_step_idx = collection_step["id"]
return collect_step_idx
def _extract_and_download_workflow(self, **extract_payload):
reimport_as = extract_payload.get("reimport_as")
if reimport_as:
history_name = reimport_as
history_id = self.history_id
self.dataset_populator.wait_for_history(history_id)
self.dataset_populator.rename_history(history_id, history_name)
history_length = extract_payload.get("reimport_wait_on_history_length")
if history_length is None:
# sometimes this won't be the same (i.e. datasets copied from outside the history
# that need to be included in target history for collections), but we can provide
# a reasonable default for fully in-history imports.
history_length = self.dataset_populator.history_length(history_id)
new_history_id = self.dataset_populator.reimport_history(
history_id, history_name, wait_on_history_length=history_length, export_kwds={}, api_key=self.galaxy_interactor.api_key
)
# wait a little more for those jobs, todo fix to wait for history imported false or
# for a specific number of jobs...
import time
time.sleep(1)
if "reimport_jobs_ids" in extract_payload:
new_history_job_ids = extract_payload["reimport_jobs_ids"](new_history_id)
extract_payload["job_ids"] = new_history_job_ids
else:
# Assume no copying or anything so just straight map job ids by index.
# Jobs are created after datasets, need to also wait on those...
history_jobs = [j for j in self.dataset_populator.history_jobs(history_id) if j["tool_id"] != "__EXPORT_HISTORY__"]
new_history_jobs = [j for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] != "__EXPORT_HISTORY__"]
history_job_ids = [j["id"] for j in history_jobs]
new_history_job_ids = [j["id"] for j in new_history_jobs]
assert len(history_job_ids) == len(new_history_job_ids)
if "job_ids" in extract_payload:
job_ids = extract_payload["job_ids"]
new_job_ids = []
for job_id in job_ids:
new_job_ids.append(new_history_job_ids[history_job_ids.index(job_id)])
extract_payload["job_ids"] = new_job_ids
self.history_id = new_history_id
if "from_history_id" not in extract_payload:
extract_payload["from_history_id"] = self.history_id
if "workflow_name" not in extract_payload:
extract_payload["workflow_name"] = "test import from history"
for key in "job_ids", "dataset_ids", "dataset_collection_ids":
if key in extract_payload:
value = extract_payload[key]
if isinstance(value, list):
extract_payload[key] = dumps(value)
create_workflow_response = self._post("workflows", data=extract_payload)
self._assert_status_code_is(create_workflow_response, 200)
new_workflow_id = create_workflow_response.json()["id"]
download_response = self._get(f"workflows/{new_workflow_id}/download")
self._assert_status_code_is(download_response, 200)
downloaded_workflow = download_response.json()
return downloaded_workflow
def _get_steps_of_type(self, downloaded_workflow, type, expected_len=None):
steps = [s for s in downloaded_workflow["steps"].values() if s["type"] == type]
if expected_len is not None:
n = len(steps)
assert n == expected_len, "Expected %d steps of type %s, found %d" % (expected_len, type, n)
return sorted(steps, key=operator.itemgetter("id"))
def __job_id(self, history_id, dataset_id):
url = f"histories/{history_id}/contents/{dataset_id}/provenance"
prov_response = self._get(url, data=dict(follow=False))
self._assert_status_code_is(prov_response, 200)
return prov_response.json()["job_id"]
def __cat_job_id(self, history_id):
data = dict(history_id=history_id, tool_id="cat1")
jobs_response = self._get("jobs", data=data)
self._assert_status_code_is(jobs_response, 200)
cat1_job_id = jobs_response.json()[0]["id"]
return cat1_job_id
def _run_tool_get_collection_and_job_id(self, history_id, tool_id, inputs):
run_output1 = self.dataset_populator.run_tool(
tool_id=tool_id,
inputs=inputs,
history_id=history_id,
)
implicit_hdca = run_output1["implicit_collections"][0]
job_id = run_output1["jobs"][0]["id"]
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
return implicit_hdca, job_id
def __check_workflow(
self,
workflow,
step_count=None,
verify_connected=False,
data_input_count=None,
data_collection_input_count=None,
tool_ids=None,
):
steps = workflow['steps']
if step_count is not None:
assert len(steps) == step_count
if verify_connected:
self.__assert_connected(workflow, steps)
if tool_ids is not None:
tool_steps = self._get_steps_of_type(workflow, "tool")
found_steps = set(map(operator.itemgetter("tool_id"), tool_steps))
expected_steps = set(tool_ids)
assert found_steps == expected_steps
if data_input_count is not None:
self._get_steps_of_type(workflow, "data_input", expected_len=data_input_count)
if data_collection_input_count is not None:
self._get_steps_of_type(workflow, "data_collection_input", expected_len=data_collection_input_count)
def __assert_connected(self, workflow, steps):
disconnected_inputs = []
for value in steps.values():
if value['type'] == "tool":
input_connections = value["input_connections"]
if not input_connections:
disconnected_inputs.append(value)
if disconnected_inputs:
template = "%d steps disconnected in extracted workflow - disconnectect steps are %s - workflow is %s"
message = template % (len(disconnected_inputs), disconnected_inputs, workflow)
raise AssertionError(message)
RunJobsSummary = namedtuple('RunJobsSummary', ['history_id', 'workflow_id', 'inputs', 'jobs'])
|
the-stack_0_10309 |
# coding: utf-8
# # Chart presentation (8) - Changing hovertext (1)
# In the last lessons we learnt how to use Pandas' <code>df.apply()</code> in conjunction with a user-defined or a <code>lambda</code> function to create a column in our DataFrame to store the value for the hovertext.
#
# In this lesson we'll apply what we've learnt to the stacked quantity C02 emissions area plot, and in the next we'll update the stacked proportional C02 emissions area plot.
#
# We will get the data and rewrite the code which creates the chart rather than reloading the charts as we need to manipulate the DataFrames from which they were created in order to make the hovertext field.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# In[2]:
#lets us see the charts in an iPython Notebook
pyo.offline.init_notebook_mode() # run at the start of every ipython
# ### Stacked quantity area plot
#
# Let's get the emissions data again:
# In[3]:
emissions = pd.read_csv("http://richard-muir.com/data/public/csv/TotalCo2EmissionsByCountry.csv", index_col=0)
emissions.head()
# ### Writing a function
#
# Seeing as we have to rewrite the code for this chart, let's try to do it as programmatically as we can. In lesson 13 of the Lineplot section we used a very long-winded way of making this chart, however in the subsequent lessons we found that we could reduce the amount of code by using the <code>df.cumsum()</code> method. We then further generalised the code by writing a function to create a stacked proportional area plot; we'll use the ideas from that function as a base to write one for a stacked quantity area plot.
#
# If you'd like a challenge, go ahead and write a function which makes a stacked quantity area plot (you can base this code on the stacked proportional area), alternatively you can code along with me!
#
# This function will have six arguments (the same five as for creating the stacked proportional area plot), plus one more which will define some of the text that goes in the hovertext field. As before, I'll write the explanation here and only include it in the finished function to save on space. We'll also test the function as we go.
# In[4]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
"""
A function which manipulates the data into the correct format to produce a stacked quantity area plot with Plotly.
Takes five arguments:
df - a pandas DataFrame
time - the time element of the data, must be a column in the DataFrame
cols - the name of the columns in the DataFrame which you want to include in the area plot
hover - the text common to every hoverlabel
title - the title of the chart
yaxisTitle - the yaxis title of the chart (the xaxis title comes from the time variable)
"""
# We need to reduce the input DataFrame down to only the columns which we need. You can also reuse this bit of code from the stacked proportional area function:
# In[5]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
return stackedAreaDF
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head()
# We don't need to create a 'Total' column because we're not calculating proportions, but we do need to calculate the cumulative sum of only the country columns:
# In[6]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
return cumulative
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head()
# In order to create the hovertext column, we need the original values for the emissions. I'm going to merge the two DataFrames by their index. Because they both have the same number of rows, this is not a problem - each row in one DataFrame will map correctly to its counterpart in the other.
#
# I also need to create a suffix for the column names for each DataFrame - because both have the same names, we need to know how to refer to the correct column:
#
# In[7]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
cumulativeAndOrig = cumulative.merge(stackedAreaDF,
left_index = True,
right_index = True,
suffixes = ('_c','_o'))
return cumulativeAndOrig
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head()
# Now we can use the Pandas' <code>df.apply(lambda x : x)</code> construction that we learnt in the previous lesson to create a text column for each country. This will also use the <code>hover</code> variable that we pass to the function:
# In[8]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
cumulAndOrig = cumulative.merge(stackedAreaDF,
left_index = True,
right_index = True,
suffixes = ('_c','_o'))
for col in cols:
cumulAndOrig[col + '_t'] = "<b>" + str(col)[:-6] + "</b><br>" + str(hover) + cumulAndOrig[col + "_o"].apply(lambda x:
"{:,}Kt".format(int(round(x, 0))))
return cumulAndOrig
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head(1)
# Now we can create our traces inside the same loop which creates the text, then create our Data, Layout and Figure objects before plotting the chart! I'm also going to return the Figure object so we can send it to the Plotly cloud:
# In[9]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
"""
A function which manipulates the data into the correct format to produce a stacked quantity area plot with Plotly.
Takes five arguments:
df - a pandas DataFrame
time - the time element of the data, must be a column in the DataFrame
cols - the name of the columns in the DataFrame which you want to include in the area plot
title - the title of the chart
yaxisTitle - the yaxis title of the chart (the xaxis title comes from the time variable)
"""
traces = []
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
cumulAndOrig = cumulative.merge(stackedAreaDF,
left_index = True,
right_index = True,
suffixes = ('_c','_o'))
for col in cols:
cumulAndOrig[col + '_t'] = "<b>" + str(col)[:-6] + "</b><br>" + str(hover) + cumulAndOrig[col + "_o"].apply(lambda x:
"{:,}Kt".format(int(round(x, 0))))
traces.append({'type' : 'scatter',
'x' : cumulAndOrig[time],
'y' : cumulAndOrig[col + "_c"],
'text' : cumulAndOrig[col + "_t"],
'hoverinfo' : 'text+x',
'name' : col[:-6],
'mode' : 'lines',
'fill' : 'tonexty'})
data = Data(traces)
layout = {'title' : title,
'xaxis' : {'title' : time},
'yaxis' : {'title' : yaxisTitle,
'ticksuffix' : ' Kt'},
'hovermode' : 'closest'}
fig = Figure(data = data, layout = layout)
pyo.iplot(fig)
return fig
# return fig
C02Quant = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
py.image.save_as(C02Quant, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(03) Chart Presentation 1\Notebooks\images\Chart presentation (8) - Changing hovertext (1)\pyo.iplot-0.png")
# Let's push this chart to the Plotly cloud:
# In[10]:
py.plot(C02Quant, "C02 Emissions for UAE, USA, UK, India & China 1960 - 2011", fileopt = 'overwrite')
py.image.save_as(C02Quant, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(03) Chart Presentation 1\Notebooks\images\Chart presentation (8) - Changing hovertext (1)\py.plot-0.png")
#
# ### What have we learnt this lesson?
# In this lesson we updated some code that we'd previously written in order to set the hovertext and tickformat on the stacked quantity area plot which we previously made.
#
# In the next lesson we'll apply this to the stacked proportional area plot.
# If you have any questions, please ask in the comments section or email <a href="mailto:[email protected]">[email protected]</a>
|
the-stack_0_10310 | #!/usr/bin/python3
import json
import falcon
from lib.const import Version, Message
from lib.utility import SystemUtility, DocumentUtility, CustomJSONEncoder
from lib.resource import BaseJsonApiResource
from lib.database import Session, Server
class ServerInfoApiResource(BaseJsonApiResource):
def on_get(self, req, resp, hostname):
resp.status = falcon.HTTP_200
body = SystemUtility.get_response_base_with_body(Version.VERSION_1)
session = Session()
try:
info = session.query(Server).filter(Server.hostname == hostname).first()
self.logger.debug(info)
body['data']['ip'] = info.ip
body['data']['hostname'] = info.hostname
body['data']['key'] = ['category', 'value', 'note']
body['data']['data'] = [
{'category': 'Hostname', 'value': info.hostname, 'note': ''},
{'category': 'IP', 'value': info.ip, 'note': ''},
{'category': 'Role', 'value': info.rolename, 'note': ''},
{'category': 'Region', 'value': info.region, 'note': ''},
{'category': 'Zone', 'value': info.zone, 'note': ''}
]
except Exception as e:
self.logger.error(e)
session.rollback()
resp.status = falcon.HTTP_500
SystemUtility.set_response_metadata(
Version.VERSION_1, body, Message.RESPONSE_NG, Message.RESPONSE_DATABASE_CONNECTION_ERROR)
finally:
session.close()
resp.body = json.dumps(body, cls=CustomJSONEncoder)
|
the-stack_0_10313 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import itertools
import operator
import zlib
import jmespath
import re
from c7n.actions import BaseAction, ModifyVpcSecurityGroupsAction
from c7n.exceptions import PolicyValidationError, ClientError
from c7n.filters import (
DefaultVpcBase, Filter, ValueFilter)
import c7n.filters.vpc as net_filters
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.filters.related import RelatedResourceFilter, RelatedResourceByIdFilter
from c7n.filters.revisions import Diff
from c7n import query, resolver
from c7n.manager import resources
from c7n.resources.securityhub import OtherResourcePostFinding, PostFinding
from c7n.utils import (
chunks, local_session, type_schema, get_retry, parse_cidr)
from c7n.resources.aws import shape_validate
from c7n.resources.shield import IsShieldProtected, SetShieldProtection
@resources.register('vpc')
class Vpc(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc'
enum_spec = ('describe_vpcs', 'Vpcs', None)
name = id = 'VpcId'
filter_name = 'VpcIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPC'
id_prefix = "vpc-"
@Vpc.filter_registry.register('flow-logs')
class FlowLogFilter(Filter):
"""Are flow logs enabled on the resource.
ie to find all vpcs with flows logs disabled we can do this
:example:
.. code-block:: yaml
policies:
- name: flow-logs-enabled
resource: vpc
filters:
- flow-logs
or to find all vpcs with flow logs but that don't match a
particular configuration.
:example:
.. code-block:: yaml
policies:
- name: flow-mis-configured
resource: vpc
filters:
- not:
- type: flow-logs
enabled: true
set-op: or
op: equal
# equality operator applies to following keys
traffic-type: all
status: active
log-group: vpc-logs
"""
schema = type_schema(
'flow-logs',
**{'enabled': {'type': 'boolean', 'default': False},
'op': {'enum': ['equal', 'not-equal'], 'default': 'equal'},
'set-op': {'enum': ['or', 'and'], 'default': 'or'},
'status': {'enum': ['active']},
'deliver-status': {'enum': ['success', 'failure']},
'destination': {'type': 'string'},
'destination-type': {'enum': ['s3', 'cloud-watch-logs']},
'traffic-type': {'enum': ['accept', 'reject', 'all']},
'log-format': {'type': 'string'},
'log-group': {'type': 'string'}})
permissions = ('ec2:DescribeFlowLogs',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
# TODO given subnet/nic level logs, we should paginate, but we'll
# need to add/update botocore pagination support.
logs = client.describe_flow_logs().get('FlowLogs', ())
m = self.manager.get_model()
resource_map = {}
for fl in logs:
resource_map.setdefault(fl['ResourceId'], []).append(fl)
enabled = self.data.get('enabled', False)
log_group = self.data.get('log-group')
log_format = self.data.get('log-format')
traffic_type = self.data.get('traffic-type')
destination_type = self.data.get('destination-type')
destination = self.data.get('destination')
status = self.data.get('status')
delivery_status = self.data.get('deliver-status')
op = self.data.get('op', 'equal') == 'equal' and operator.eq or operator.ne
set_op = self.data.get('set-op', 'or')
results = []
# looping over vpc resources
for r in resources:
if r[m.id] not in resource_map:
# we didn't find a flow log for this vpc
if enabled:
# vpc flow logs not enabled so exclude this vpc from results
continue
results.append(r)
continue
flogs = resource_map[r[m.id]]
r['c7n:flow-logs'] = flogs
# config comparisons are pointless if we only want vpcs with no flow logs
if enabled:
fl_matches = []
for fl in flogs:
dest_type_match = (destination_type is None) or op(
fl['LogDestinationType'], destination_type)
dest_match = (destination is None) or op(
fl['LogDestination'], destination)
status_match = (status is None) or op(fl['FlowLogStatus'], status.upper())
delivery_status_match = (delivery_status is None) or op(
fl['DeliverLogsStatus'], delivery_status.upper())
traffic_type_match = (
traffic_type is None) or op(
fl['TrafficType'],
traffic_type.upper())
log_group_match = (log_group is None) or op(fl.get('LogGroupName'), log_group)
log_format_match = (log_format is None) or op(fl.get('LogFormat'), log_format)
# combine all conditions to check if flow log matches the spec
fl_match = (status_match and traffic_type_match and dest_match and
log_format_match and log_group_match and
dest_type_match and delivery_status_match)
fl_matches.append(fl_match)
if set_op == 'or':
if any(fl_matches):
results.append(r)
elif set_op == 'and':
if all(fl_matches):
results.append(r)
return results
@Vpc.filter_registry.register('security-group')
class VpcSecurityGroupFilter(RelatedResourceFilter):
"""Filter VPCs based on Security Group attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-sg
resource: vpc
filters:
- type: security-group
key: tag:Color
value: Gray
"""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.SecurityGroup"
RelatedIdsExpression = '[SecurityGroups][].GroupId'
AnnotationKey = "matched-vpcs"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_group_ids = {
g['GroupId'] for g in
self.manager.get_resource_manager('security-group').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_group_ids
@Vpc.filter_registry.register('subnet')
class VpcSubnetFilter(RelatedResourceFilter):
"""Filter VPCs based on Subnet attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-subnet
resource: vpc
filters:
- type: subnet
key: tag:Color
value: Gray
"""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.Subnet"
RelatedIdsExpression = '[Subnets][].SubnetId'
AnnotationKey = "MatchedVpcsSubnets"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_subnet_ids = {
g['SubnetId'] for g in
self.manager.get_resource_manager('subnet').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_subnet_ids
@Vpc.filter_registry.register('nat-gateway')
class VpcNatGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on NAT Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-nat
resource: vpc
filters:
- type: nat-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'nat-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.NATGateway"
RelatedIdsExpression = '[NatGateways][].NatGatewayId'
AnnotationKey = "MatchedVpcsNatGateways"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_natgw_ids = {
g['NatGatewayId'] for g in
self.manager.get_resource_manager('nat-gateway').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_natgw_ids
@Vpc.filter_registry.register('internet-gateway')
class VpcInternetGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on Internet Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-igw
resource: vpc
filters:
- type: internet-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'internet-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.InternetGateway"
RelatedIdsExpression = '[InternetGateways][].InternetGatewayId'
AnnotationKey = "MatchedVpcsIgws"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_igw_ids = set()
for igw in self.manager.get_resource_manager('internet-gateway').resources():
for attachment in igw['Attachments']:
if attachment.get('VpcId', '') in vpc_ids:
vpc_igw_ids.add(igw['InternetGatewayId'])
return vpc_igw_ids
@Vpc.filter_registry.register('vpc-attributes')
class AttributesFilter(Filter):
"""Filters VPCs based on their DNS attributes
:example:
.. code-block:: yaml
policies:
- name: dns-hostname-enabled
resource: vpc
filters:
- type: vpc-attributes
dnshostnames: True
"""
schema = type_schema(
'vpc-attributes',
dnshostnames={'type': 'boolean'},
dnssupport={'type': 'boolean'})
permissions = ('ec2:DescribeVpcAttribute',)
def process(self, resources, event=None):
results = []
client = local_session(self.manager.session_factory).client('ec2')
dns_hostname = self.data.get('dnshostnames', None)
dns_support = self.data.get('dnssupport', None)
for r in resources:
if dns_hostname is not None:
hostname = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsHostnames'
)['EnableDnsHostnames']['Value']
if dns_support is not None:
support = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsSupport'
)['EnableDnsSupport']['Value']
if dns_hostname is not None and dns_support is not None:
if dns_hostname == hostname and dns_support == support:
results.append(r)
elif dns_hostname is not None and dns_support is None:
if dns_hostname == hostname:
results.append(r)
elif dns_support is not None and dns_hostname is None:
if dns_support == support:
results.append(r)
return results
@Vpc.filter_registry.register('dhcp-options')
class DhcpOptionsFilter(Filter):
"""Filter VPCs based on their dhcp options
:example:
.. code-block:: yaml
policies:
- name: vpcs-in-domain
resource: vpc
filters:
- type: dhcp-options
domain-name: ec2.internal
if an option value is specified as a list, then all elements must be present.
if an option value is specified as a string, then that string must be present.
vpcs not matching a given option value can be found via specifying
a `present: false` parameter.
"""
option_keys = ('domain-name', 'domain-name-servers', 'ntp-servers')
schema = type_schema('dhcp-options', **{
k: {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string'}]}
for k in option_keys})
schema['properties']['present'] = {'type': 'boolean'}
permissions = ('ec2:DescribeDhcpOptions',)
def validate(self):
if not any([self.data.get(k) for k in self.option_keys]):
raise PolicyValidationError("one of %s required" % (self.option_keys,))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
option_ids = [r['DhcpOptionsId'] for r in resources]
options_map = {}
results = []
for options in client.describe_dhcp_options(
Filters=[{
'Name': 'dhcp-options-id',
'Values': option_ids}]).get('DhcpOptions', ()):
options_map[options['DhcpOptionsId']] = {
o['Key']: [v['Value'] for v in o['Values']]
for o in options['DhcpConfigurations']}
for vpc in resources:
if self.process_vpc(vpc, options_map[vpc['DhcpOptionsId']]):
results.append(vpc)
return results
def process_vpc(self, vpc, dhcp):
vpc['c7n:DhcpConfiguration'] = dhcp
found = True
for k in self.option_keys:
if k not in self.data:
continue
is_list = isinstance(self.data[k], list)
if k not in dhcp:
found = False
elif not is_list and self.data[k] not in dhcp[k]:
found = False
elif is_list and sorted(self.data[k]) != sorted(dhcp[k]):
found = False
if not self.data.get('present', True):
found = not found
return found
@Vpc.action_registry.register('post-finding')
class VpcPostFinding(PostFinding):
resource_type = "AwsEc2Vpc"
def format_resource(self, r):
envelope, payload = self.format_envelope(r)
# more inane sechub formatting deltas
detail = {
'DhcpOptionsId': r.get('DhcpOptionsId'),
'State': r['State']}
for assoc in r.get('CidrBlockAssociationSet', ()):
detail.setdefault('CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
CidrBlock=assoc['CidrBlock'],
CidrBlockState=assoc['CidrBlockState']['State']))
for assoc in r.get('Ipv6CidrBlockAssociationSet', ()):
detail.setdefault('Ipv6CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
Ipv6CidrBlock=assoc['Ipv6CidrBlock'],
CidrBlockState=assoc['Ipv6CidrBlockState']['State']))
payload.update(self.filter_empty(detail))
return envelope
class DescribeSubnets(query.DescribeSource):
def get_resources(self, resource_ids):
while resource_ids:
try:
return super().get_resources(resource_ids)
except ClientError as e:
if e.response['Error']['Code'] != 'InvalidSubnetID.NotFound':
raise
sid = extract_subnet_id(e)
if sid:
resource_ids.remove(sid)
else:
return []
RE_ERROR_SUBNET_ID = re.compile("'(?P<subnet_id>subnet-.*?)'")
def extract_subnet_id(state_error):
"Extract an subnet id from an error"
subnet_id = None
match = RE_ERROR_SUBNET_ID.search(str(state_error))
if match:
subnet_id = match.groupdict().get('subnet_id')
return subnet_id
@resources.register('subnet')
class Subnet(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'subnet'
enum_spec = ('describe_subnets', 'Subnets', None)
name = id = 'SubnetId'
filter_name = 'SubnetIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::Subnet'
id_prefix = "subnet-"
source_mapping = {
'describe': DescribeSubnets,
'config': query.ConfigSource}
Subnet.filter_registry.register('flow-logs', FlowLogFilter)
@Subnet.filter_registry.register('vpc')
class SubnetVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
class ConfigSG(query.ConfigSource):
def load_resource(self, item):
r = super(ConfigSG, self).load_resource(item)
for rset in ('IpPermissions', 'IpPermissionsEgress'):
for p in r.get(rset, ()):
if p.get('FromPort', '') is None:
p.pop('FromPort')
if p.get('ToPort', '') is None:
p.pop('ToPort')
if 'Ipv6Ranges' not in p:
p[u'Ipv6Ranges'] = []
for i in p.get('UserIdGroupPairs', ()):
for k, v in list(i.items()):
if v is None:
i.pop(k)
# legacy config form, still version 1.2
for attribute, element_key in (('IpRanges', u'CidrIp'),):
if attribute not in p:
continue
p[attribute] = [{element_key: v} for v in p[attribute]]
if 'Ipv4Ranges' in p:
p['IpRanges'] = p.pop('Ipv4Ranges')
return r
@resources.register('security-group')
class SecurityGroup(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'security-group'
enum_spec = ('describe_security_groups', 'SecurityGroups', None)
id = 'GroupId'
name = 'GroupName'
filter_name = "GroupIds"
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::SecurityGroup"
id_prefix = "sg-"
source_mapping = {
'config': ConfigSG,
'describe': query.DescribeSource
}
@SecurityGroup.filter_registry.register('diff')
class SecurityGroupDiffFilter(Diff):
def diff(self, source, target):
differ = SecurityGroupDiff()
return differ.diff(source, target)
class SecurityGroupDiff:
"""Diff two versions of a security group
Immutable: GroupId, GroupName, Description, VpcId, OwnerId
Mutable: Tags, Rules
"""
def diff(self, source, target):
delta = {}
tag_delta = self.get_tag_delta(source, target)
if tag_delta:
delta['tags'] = tag_delta
ingress_delta = self.get_rule_delta('IpPermissions', source, target)
if ingress_delta:
delta['ingress'] = ingress_delta
egress_delta = self.get_rule_delta(
'IpPermissionsEgress', source, target)
if egress_delta:
delta['egress'] = egress_delta
if delta:
return delta
def get_tag_delta(self, source, target):
source_tags = {t['Key']: t['Value'] for t in source.get('Tags', ())}
target_tags = {t['Key']: t['Value'] for t in target.get('Tags', ())}
target_keys = set(target_tags.keys())
source_keys = set(source_tags.keys())
removed = source_keys.difference(target_keys)
added = target_keys.difference(source_keys)
changed = set()
for k in target_keys.intersection(source_keys):
if source_tags[k] != target_tags[k]:
changed.add(k)
return {k: v for k, v in {
'added': {k: target_tags[k] for k in added},
'removed': {k: source_tags[k] for k in removed},
'updated': {k: target_tags[k] for k in changed}}.items() if v}
def get_rule_delta(self, key, source, target):
source_rules = {
self.compute_rule_hash(r): r for r in source.get(key, ())}
target_rules = {
self.compute_rule_hash(r): r for r in target.get(key, ())}
source_keys = set(source_rules.keys())
target_keys = set(target_rules.keys())
removed = source_keys.difference(target_keys)
added = target_keys.difference(source_keys)
return {k: v for k, v in
{'removed': [source_rules[rid] for rid in sorted(removed)],
'added': [target_rules[rid] for rid in sorted(added)]}.items() if v}
RULE_ATTRS = (
('PrefixListIds', 'PrefixListId'),
('UserIdGroupPairs', 'GroupId'),
('IpRanges', 'CidrIp'),
('Ipv6Ranges', 'CidrIpv6')
)
def compute_rule_hash(self, rule):
buf = "%d-%d-%s-" % (
rule.get('FromPort', 0) or 0,
rule.get('ToPort', 0) or 0,
rule.get('IpProtocol', '-1') or '-1'
)
for a, ke in self.RULE_ATTRS:
if a not in rule:
continue
ev = [e[ke] for e in rule[a]]
ev.sort()
for e in ev:
buf += "%s-" % e
# mask to generate the same numeric value across all Python versions
return zlib.crc32(buf.encode('ascii')) & 0xffffffff
@SecurityGroup.action_registry.register('patch')
class SecurityGroupApplyPatch(BaseAction):
"""Modify a resource via application of a reverse delta.
"""
schema = type_schema('patch')
permissions = ('ec2:AuthorizeSecurityGroupIngress',
'ec2:AuthorizeSecurityGroupEgress',
'ec2:RevokeSecurityGroupIngress',
'ec2:RevokeSecurityGroupEgress',
'ec2:CreateTags',
'ec2:DeleteTags')
def validate(self):
diff_filters = [n for n in self.manager.iter_filters() if isinstance(
n, SecurityGroupDiffFilter)]
if not len(diff_filters):
raise PolicyValidationError(
"resource patching requires diff filter")
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
differ = SecurityGroupDiff()
patcher = SecurityGroupPatch()
for r in resources:
# reverse the patch by computing fresh, the forward
# patch is for notifications
d = differ.diff(r, r['c7n:previous-revision']['resource'])
patcher.apply_delta(client, r, d)
class SecurityGroupPatch:
RULE_TYPE_MAP = {
'egress': ('IpPermissionsEgress',
'revoke_security_group_egress',
'authorize_security_group_egress'),
'ingress': ('IpPermissions',
'revoke_security_group_ingress',
'authorize_security_group_ingress')}
retry = staticmethod(get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded')))
def apply_delta(self, client, target, change_set):
if 'tags' in change_set:
self.process_tags(client, target, change_set['tags'])
if 'ingress' in change_set:
self.process_rules(
client, 'ingress', target, change_set['ingress'])
if 'egress' in change_set:
self.process_rules(
client, 'egress', target, change_set['egress'])
def process_tags(self, client, group, tag_delta):
if 'removed' in tag_delta:
self.retry(client.delete_tags,
Resources=[group['GroupId']],
Tags=[{'Key': k}
for k in tag_delta['removed']])
tags = []
if 'added' in tag_delta:
tags.extend(
[{'Key': k, 'Value': v}
for k, v in tag_delta['added'].items()])
if 'updated' in tag_delta:
tags.extend(
[{'Key': k, 'Value': v}
for k, v in tag_delta['updated'].items()])
if tags:
self.retry(
client.create_tags, Resources=[group['GroupId']], Tags=tags)
def process_rules(self, client, rule_type, group, delta):
key, revoke_op, auth_op = self.RULE_TYPE_MAP[rule_type]
revoke, authorize = getattr(
client, revoke_op), getattr(client, auth_op)
# Process removes
if 'removed' in delta:
self.retry(revoke, GroupId=group['GroupId'],
IpPermissions=[r for r in delta['removed']])
# Process adds
if 'added' in delta:
self.retry(authorize, GroupId=group['GroupId'],
IpPermissions=[r for r in delta['added']])
class SGUsage(Filter):
def get_permissions(self):
return list(itertools.chain(
*[self.manager.get_resource_manager(m).get_permissions()
for m in
['lambda', 'eni', 'launch-config', 'security-group', 'event-rule-target']]))
def filter_peered_refs(self, resources):
if not resources:
return resources
# Check that groups are not referenced across accounts
client = local_session(self.manager.session_factory).client('ec2')
peered_ids = set()
for resource_set in chunks(resources, 200):
for sg_ref in client.describe_security_group_references(
GroupId=[r['GroupId'] for r in resource_set]
)['SecurityGroupReferenceSet']:
peered_ids.add(sg_ref['GroupId'])
self.log.debug(
"%d of %d groups w/ peered refs", len(peered_ids), len(resources))
return [r for r in resources if r['GroupId'] not in peered_ids]
def get_scanners(self):
return (
("nics", self.get_eni_sgs),
("sg-perm-refs", self.get_sg_refs),
('lambdas', self.get_lambda_sgs),
("launch-configs", self.get_launch_config_sgs),
("ecs-cwe", self.get_ecs_cwe_sgs),
("codebuild", self.get_codebuild_sgs),
)
def scan_groups(self):
used = set()
for kind, scanner in self.get_scanners():
sg_ids = scanner()
new_refs = sg_ids.difference(used)
used = used.union(sg_ids)
self.log.debug(
"%s using %d sgs, new refs %s total %s",
kind, len(sg_ids), len(new_refs), len(used))
return used
def get_launch_config_sgs(self):
# Note assuming we also have launch config garbage collection
# enabled.
sg_ids = set()
for cfg in self.manager.get_resource_manager('launch-config').resources():
for g in cfg['SecurityGroups']:
sg_ids.add(g)
for g in cfg['ClassicLinkVPCSecurityGroups']:
sg_ids.add(g)
return sg_ids
def get_lambda_sgs(self):
sg_ids = set()
for func in self.manager.get_resource_manager('lambda').resources(augment=False):
if 'VpcConfig' not in func:
continue
for g in func['VpcConfig']['SecurityGroupIds']:
sg_ids.add(g)
return sg_ids
def get_eni_sgs(self):
sg_ids = set()
for nic in self.manager.get_resource_manager('eni').resources():
for g in nic['Groups']:
sg_ids.add(g['GroupId'])
return sg_ids
def get_codebuild_sgs(self):
sg_ids = set()
for cb in self.manager.get_resource_manager('codebuild').resources():
sg_ids |= set(cb.get('vpcConfig', {}).get('securityGroupIds', []))
return sg_ids
def get_sg_refs(self):
sg_ids = set()
for sg in self.manager.get_resource_manager('security-group').resources():
for perm_type in ('IpPermissions', 'IpPermissionsEgress'):
for p in sg.get(perm_type, []):
for g in p.get('UserIdGroupPairs', ()):
sg_ids.add(g['GroupId'])
return sg_ids
def get_ecs_cwe_sgs(self):
sg_ids = set()
expr = jmespath.compile(
'EcsParameters.NetworkConfiguration.awsvpcConfiguration.SecurityGroups[]')
for rule in self.manager.get_resource_manager(
'event-rule-target').resources(augment=False):
ids = expr.search(rule)
if ids:
sg_ids.update(ids)
return sg_ids
@SecurityGroup.filter_registry.register('unused')
class UnusedSecurityGroup(SGUsage):
"""Filter to just vpc security groups that are not used.
We scan all extant enis in the vpc to get a baseline set of groups
in use. Then augment with those referenced by launch configs, and
lambdas as they may not have extant resources in the vpc at a
given moment. We also find any security group with references from
other security group either within the vpc or across peered
connections. Also checks cloud watch event targeting ecs.
Checks - enis, lambda, launch-configs, sg rule refs, and ecs cwe
targets.
Note this filter does not support classic security groups atm.
:example:
.. code-block:: yaml
policies:
- name: security-groups-unused
resource: security-group
filters:
- unused
"""
schema = type_schema('unused')
def process(self, resources, event=None):
used = self.scan_groups()
unused = [
r for r in resources
if r['GroupId'] not in used and 'VpcId' in r]
return unused and self.filter_peered_refs(unused) or []
@SecurityGroup.filter_registry.register('used')
class UsedSecurityGroup(SGUsage):
"""Filter to security groups that are used.
This operates as a complement to the unused filter for multi-step
workflows.
:example:
.. code-block:: yaml
policies:
- name: security-groups-in-use
resource: security-group
filters:
- used
"""
schema = type_schema('used')
def process(self, resources, event=None):
used = self.scan_groups()
unused = [
r for r in resources
if r['GroupId'] not in used and 'VpcId' in r]
unused = {g['GroupId'] for g in self.filter_peered_refs(unused)}
return [r for r in resources if r['GroupId'] not in unused]
@SecurityGroup.filter_registry.register('stale')
class Stale(Filter):
"""Filter to find security groups that contain stale references
to other groups that are either no longer present or traverse
a broken vpc peering connection. Note this applies to VPC
Security groups only and will implicitly filter security groups.
AWS Docs:
https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-security-groups.html
:example:
.. code-block:: yaml
policies:
- name: stale-security-groups
resource: security-group
filters:
- stale
"""
schema = type_schema('stale')
permissions = ('ec2:DescribeStaleSecurityGroups',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
vpc_ids = {r['VpcId'] for r in resources if 'VpcId' in r}
group_map = {r['GroupId']: r for r in resources}
results = []
self.log.debug("Querying %d vpc for stale refs", len(vpc_ids))
stale_count = 0
for vpc_id in vpc_ids:
stale_groups = client.describe_stale_security_groups(
VpcId=vpc_id).get('StaleSecurityGroupSet', ())
stale_count += len(stale_groups)
for s in stale_groups:
if s['GroupId'] in group_map:
r = group_map[s['GroupId']]
if 'StaleIpPermissions' in s:
r['MatchedIpPermissions'] = s['StaleIpPermissions']
if 'StaleIpPermissionsEgress' in s:
r['MatchedIpPermissionsEgress'] = s[
'StaleIpPermissionsEgress']
results.append(r)
self.log.debug("Found %d stale security groups", stale_count)
return results
@SecurityGroup.filter_registry.register('default-vpc')
class SGDefaultVpc(DefaultVpcBase):
"""Filter that returns any security group that exists within the default vpc
:example:
.. code-block:: yaml
policies:
- name: security-group-default-vpc
resource: security-group
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, resource, event=None):
if 'VpcId' not in resource:
return False
return self.match(resource['VpcId'])
class SGPermission(Filter):
"""Filter for verifying security group ingress and egress permissions
All attributes of a security group permission are available as
value filters.
If multiple attributes are specified the permission must satisfy
all of them. Note that within an attribute match against a list value
of a permission we default to or.
If a group has any permissions that match all conditions, then it
matches the filter.
Permissions that match on the group are annotated onto the group and
can subsequently be used by the remove-permission action.
We have specialized handling for matching `Ports` in ingress/egress
permission From/To range. The following example matches on ingress
rules which allow for a range that includes all of the given ports.
.. code-block:: yaml
- type: ingress
Ports: [22, 443, 80]
As well for verifying that a rule only allows for a specific set of ports
as in the following example. The delta between this and the previous
example is that if the permission allows for any ports not specified here,
then the rule will match. ie. OnlyPorts is a negative assertion match,
it matches when a permission includes ports outside of the specified set.
.. code-block:: yaml
- type: ingress
OnlyPorts: [22]
For simplifying ipranges handling which is specified as a list on a rule
we provide a `Cidr` key which can be used as a value type filter evaluated
against each of the rules. If any iprange cidr match then the permission
matches.
.. code-block:: yaml
- type: ingress
IpProtocol: -1
FromPort: 445
We also have specialized handling for matching self-references in
ingress/egress permissions. The following example matches on ingress
rules which allow traffic its own same security group.
.. code-block:: yaml
- type: ingress
SelfReference: True
As well for assertions that a ingress/egress permission only matches
a given set of ports, *note* OnlyPorts is an inverse match.
.. code-block:: yaml
- type: egress
OnlyPorts: [22, 443, 80]
- type: egress
Cidr:
value_type: cidr
op: in
value: x.y.z
`Cidr` can match ipv4 rules and `CidrV6` can match ipv6 rules. In
this example we are blocking global inbound connections to SSH or
RDP.
.. code-block:: yaml
- or:
- type: ingress
Ports: [22, 3389]
Cidr:
value: "0.0.0.0/0"
- type: ingress
Ports: [22, 3389]
CidrV6:
value: "::/0"
`SGReferences` can be used to filter out SG references in rules.
In this example we want to block ingress rules that reference a SG
that is tagged with `Access: Public`.
.. code-block:: yaml
- type: ingress
SGReferences:
key: "tag:Access"
value: "Public"
op: equal
We can also filter SG references based on the VPC that they are
within. In this example we want to ensure that our outbound rules
that reference SGs are only referencing security groups within a
specified VPC.
.. code-block:: yaml
- type: egress
SGReferences:
key: 'VpcId'
value: 'vpc-11a1a1aa'
op: equal
Likewise, we can also filter SG references by their description.
For example, we can prevent egress rules from referencing any
SGs that have a description of "default - DO NOT USE".
.. code-block:: yaml
- type: egress
SGReferences:
key: 'Description'
value: 'default - DO NOT USE'
op: equal
"""
perm_attrs = {
'IpProtocol', 'FromPort', 'ToPort', 'UserIdGroupPairs',
'IpRanges', 'PrefixListIds'}
filter_attrs = {
'Cidr', 'CidrV6', 'Ports', 'OnlyPorts',
'SelfReference', 'Description', 'SGReferences'}
attrs = perm_attrs.union(filter_attrs)
attrs.add('match-operator')
attrs.add('match-operator')
def validate(self):
delta = set(self.data.keys()).difference(self.attrs)
delta.remove('type')
if delta:
raise PolicyValidationError("Unknown keys %s on %s" % (
", ".join(delta), self.manager.data))
return self
def process(self, resources, event=None):
self.vfilters = []
fattrs = list(sorted(self.perm_attrs.intersection(self.data.keys())))
self.ports = 'Ports' in self.data and self.data['Ports'] or ()
self.only_ports = (
'OnlyPorts' in self.data and self.data['OnlyPorts'] or ())
for f in fattrs:
fv = self.data.get(f)
if isinstance(fv, dict):
fv['key'] = f
else:
fv = {f: fv}
vf = ValueFilter(fv, self.manager)
vf.annotate = False
self.vfilters.append(vf)
return super(SGPermission, self).process(resources, event)
def process_ports(self, perm):
found = None
if 'FromPort' in perm and 'ToPort' in perm:
for port in self.ports:
if port >= perm['FromPort'] and port <= perm['ToPort']:
found = True
break
found = False
only_found = False
for port in self.only_ports:
if port == perm['FromPort'] and port == perm['ToPort']:
only_found = True
if self.only_ports and not only_found:
found = found is None or found and True or False
if self.only_ports and only_found:
found = False
return found
def _process_cidr(self, cidr_key, cidr_type, range_type, perm):
found = None
ip_perms = perm.get(range_type, [])
if not ip_perms:
return False
match_range = self.data[cidr_key]
if isinstance(match_range, dict):
match_range['key'] = cidr_type
else:
match_range = {cidr_type: match_range}
vf = ValueFilter(match_range, self.manager)
vf.annotate = False
for ip_range in ip_perms:
found = vf(ip_range)
if found:
break
else:
found = False
return found
def process_cidrs(self, perm):
found_v6 = found_v4 = None
if 'CidrV6' in self.data:
found_v6 = self._process_cidr('CidrV6', 'CidrIpv6', 'Ipv6Ranges', perm)
if 'Cidr' in self.data:
found_v4 = self._process_cidr('Cidr', 'CidrIp', 'IpRanges', perm)
match_op = self.data.get('match-operator', 'and') == 'and' and all or any
cidr_match = [k for k in (found_v6, found_v4) if k is not None]
if not cidr_match:
return None
return match_op(cidr_match)
def process_description(self, perm):
if 'Description' not in self.data:
return None
d = dict(self.data['Description'])
d['key'] = 'Description'
vf = ValueFilter(d, self.manager)
vf.annotate = False
for k in ('Ipv6Ranges', 'IpRanges', 'UserIdGroupPairs', 'PrefixListIds'):
if k not in perm or not perm[k]:
continue
return vf(perm[k][0])
return False
def process_self_reference(self, perm, sg_id):
found = None
ref_match = self.data.get('SelfReference')
if ref_match is not None:
found = False
if 'UserIdGroupPairs' in perm and 'SelfReference' in self.data:
self_reference = sg_id in [p['GroupId']
for p in perm['UserIdGroupPairs']]
if ref_match is False and not self_reference:
found = True
if ref_match is True and self_reference:
found = True
return found
def process_sg_references(self, perm, owner_id):
sg_refs = self.data.get('SGReferences')
if not sg_refs:
return None
sg_perm = perm.get('UserIdGroupPairs', [])
if not sg_perm:
return False
sg_group_ids = [p['GroupId'] for p in sg_perm if p.get('UserId', '') == owner_id]
sg_resources = self.manager.get_resources(sg_group_ids)
vf = ValueFilter(sg_refs, self.manager)
vf.annotate = False
for sg in sg_resources:
if vf(sg):
return True
return False
def expand_permissions(self, permissions):
"""Expand each list of cidr, prefix list, user id group pair
by port/protocol as an individual rule.
The console ux automatically expands them out as addition/removal is
per this expansion, the describe calls automatically group them.
"""
for p in permissions:
np = dict(p)
values = {}
for k in (u'IpRanges',
u'Ipv6Ranges',
u'PrefixListIds',
u'UserIdGroupPairs'):
values[k] = np.pop(k, ())
np[k] = []
for k, v in values.items():
if not v:
continue
for e in v:
ep = dict(np)
ep[k] = [e]
yield ep
def __call__(self, resource):
matched = []
sg_id = resource['GroupId']
owner_id = resource['OwnerId']
match_op = self.data.get('match-operator', 'and') == 'and' and all or any
for perm in self.expand_permissions(resource[self.ip_permissions_key]):
perm_matches = {}
for idx, f in enumerate(self.vfilters):
perm_matches[idx] = bool(f(perm))
perm_matches['description'] = self.process_description(perm)
perm_matches['ports'] = self.process_ports(perm)
perm_matches['cidrs'] = self.process_cidrs(perm)
perm_matches['self-refs'] = self.process_self_reference(perm, sg_id)
perm_matches['sg-refs'] = self.process_sg_references(perm, owner_id)
perm_match_values = list(filter(
lambda x: x is not None, perm_matches.values()))
# account for one python behavior any([]) == False, all([]) == True
if match_op == all and not perm_match_values:
continue
match = match_op(perm_match_values)
if match:
matched.append(perm)
if matched:
resource['Matched%s' % self.ip_permissions_key] = matched
return True
SGPermissionSchema = {
'match-operator': {'type': 'string', 'enum': ['or', 'and']},
'Ports': {'type': 'array', 'items': {'type': 'integer'}},
'SelfReference': {'type': 'boolean'},
'OnlyPorts': {'type': 'array', 'items': {'type': 'integer'}},
'IpProtocol': {
'oneOf': [
{'enum': ["-1", -1, 'tcp', 'udp', 'icmp', 'icmpv6']},
{'$ref': '#/definitions/filters/value'}
]
},
'FromPort': {'oneOf': [
{'$ref': '#/definitions/filters/value'},
{'type': 'integer'}]},
'ToPort': {'oneOf': [
{'$ref': '#/definitions/filters/value'},
{'type': 'integer'}]},
'UserIdGroupPairs': {},
'IpRanges': {},
'PrefixListIds': {},
'Description': {},
'Cidr': {},
'CidrV6': {},
'SGReferences': {}
}
@SecurityGroup.filter_registry.register('ingress')
class IPPermission(SGPermission):
ip_permissions_key = "IpPermissions"
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {'type': {'enum': ['ingress']}},
'required': ['type']}
schema['properties'].update(SGPermissionSchema)
@SecurityGroup.filter_registry.register('egress')
class IPPermissionEgress(SGPermission):
ip_permissions_key = "IpPermissionsEgress"
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {'type': {'enum': ['egress']}},
'required': ['type']}
schema['properties'].update(SGPermissionSchema)
@SecurityGroup.action_registry.register('delete')
class Delete(BaseAction):
"""Action to delete security group(s)
It is recommended to apply a filter to the delete policy to avoid the
deletion of all security groups returned.
:example:
.. code-block:: yaml
policies:
- name: security-groups-unused-delete
resource: security-group
filters:
- type: unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('ec2:DeleteSecurityGroup',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
client.delete_security_group(GroupId=r['GroupId'])
@SecurityGroup.action_registry.register('remove-permissions')
class RemovePermissions(BaseAction):
"""Action to remove ingress/egress rule(s) from a security group
:example:
.. code-block:: yaml
policies:
- name: security-group-revoke-8080
resource: security-group
filters:
- type: ingress
IpProtocol: tcp
Ports: [8080]
actions:
- type: remove-permissions
ingress: matched
"""
schema = type_schema(
'remove-permissions',
ingress={'type': 'string', 'enum': ['matched', 'all']},
egress={'type': 'string', 'enum': ['matched', 'all']})
permissions = ('ec2:RevokeSecurityGroupIngress',
'ec2:RevokeSecurityGroupEgress')
def process(self, resources):
i_perms = self.data.get('ingress', 'matched')
e_perms = self.data.get('egress', 'matched')
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
for label, perms in [('ingress', i_perms), ('egress', e_perms)]:
if perms == 'matched':
key = 'MatchedIpPermissions%s' % (
label == 'egress' and 'Egress' or '')
groups = r.get(key, ())
elif perms == 'all':
key = 'IpPermissions%s' % (
label == 'egress' and 'Egress' or '')
groups = r.get(key, ())
elif isinstance(perms, list):
groups = perms
else:
continue
if not groups:
continue
method = getattr(client, 'revoke_security_group_%s' % label)
method(GroupId=r['GroupId'], IpPermissions=groups)
@SecurityGroup.action_registry.register('set-permissions')
class SetPermissions(BaseAction):
"""Action to add/remove ingress/egress rule(s) to a security group
:example:
.. code-block:: yaml
policies:
- name: ops-access-via
resource: aws.security-group
filters:
- type: ingress
IpProtocol: "-1"
Ports: [22, 3389]
Cidr: "0.0.0.0/0"
actions:
- type: set-permissions
# remove the permission matched by a previous ingress filter.
remove-ingress: matched
# remove permissions by specifying them fully, ie remove default outbound
# access.
remove-egress:
- IpProtocol: "-1"
Cidr: "0.0.0.0/0"
# add a list of permissions to the group.
add-ingress:
# full syntax/parameters to authorize can be used.
- IpPermissions:
- IpProtocol: TCP
FromPort: 22
ToPort: 22
IpRanges:
- Description: Ops SSH Access
CidrIp: "1.1.1.1/32"
- Description: Security SSH Access
CidrIp: "2.2.2.2/32"
# add a list of egress permissions to a security group
add-egress:
- IpProtocol: "TCP"
FromPort: 5044
ToPort: 5044
CidrIp: "192.168.1.2/32"
"""
schema = type_schema(
'set-permissions',
**{'add-ingress': {'type': 'array', 'items': {'type': 'object', 'minProperties': 1}},
'remove-ingress': {'oneOf': [
{'enum': ['all', 'matched']},
{'type': 'array', 'items': {'type': 'object', 'minProperties': 2}}]},
'add-egress': {'type': 'array', 'items': {'type': 'object', 'minProperties': 1}},
'remove-egress': {'oneOf': [
{'enum': ['all', 'matched']},
{'type': 'array', 'items': {'type': 'object', 'minProperties': 2}}]}}
)
permissions = (
'ec2:AuthorizeSecurityGroupEgress',
'ec2:AuthorizeSecurityGroupIngress',)
ingress_shape = "AuthorizeSecurityGroupIngressRequest"
egress_shape = "AuthorizeSecurityGroupEgressRequest"
def validate(self):
request_template = {'GroupId': 'sg-06bc5ce18a2e5d57a'}
for perm_type, shape in (
('egress', self.egress_shape), ('ingress', self.ingress_shape)):
for perm in self.data.get('add-%s' % type, ()):
params = dict(request_template)
params.update(perm)
shape_validate(params, shape, 'ec2')
def get_permissions(self):
perms = ()
if 'add-ingress' in self.data:
perms += ('ec2:AuthorizeSecurityGroupIngress',)
if 'add-egress' in self.data:
perms += ('ec2:AuthorizeSecurityGroupEgress',)
if 'remove-ingress' in self.data or 'remove-egress' in self.data:
perms += RemovePermissions.permissions
if not perms:
perms = self.permissions + RemovePermissions.permissions
return perms
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
for method, permissions in (
(client.authorize_security_group_egress, self.data.get('add-egress', ())),
(client.authorize_security_group_ingress, self.data.get('add-ingress', ()))):
for p in permissions:
p = dict(p)
p['GroupId'] = r['GroupId']
try:
method(**p)
except ClientError as e:
if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':
raise
remover = RemovePermissions(
{'ingress': self.data.get('remove-ingress', ()),
'egress': self.data.get('remove-egress', ())}, self.manager)
remover.process(resources)
@SecurityGroup.action_registry.register('post-finding')
class SecurityGroupPostFinding(OtherResourcePostFinding):
def format_resource(self, r):
fr = super(SecurityGroupPostFinding, self).format_resource(r)
fr['Type'] = 'AwsEc2SecurityGroup'
return fr
class DescribeENI(query.DescribeSource):
def augment(self, resources):
for r in resources:
r['Tags'] = r.pop('TagSet', [])
return resources
@resources.register('eni')
class NetworkInterface(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'eni'
enum_spec = ('describe_network_interfaces', 'NetworkInterfaces', None)
name = id = 'NetworkInterfaceId'
filter_name = 'NetworkInterfaceIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::NetworkInterface"
id_prefix = "eni-"
source_mapping = {
'describe': DescribeENI,
'config': query.ConfigSource
}
NetworkInterface.filter_registry.register('flow-logs', FlowLogFilter)
NetworkInterface.filter_registry.register(
'network-location', net_filters.NetworkLocation)
@NetworkInterface.filter_registry.register('subnet')
class InterfaceSubnetFilter(net_filters.SubnetFilter):
"""Network interface subnet filter
:example:
.. code-block:: yaml
policies:
- name: network-interface-in-subnet
resource: eni
filters:
- type: subnet
key: CidrBlock
value: 10.0.2.0/24
"""
RelatedIdsExpression = "SubnetId"
@NetworkInterface.filter_registry.register('security-group')
class InterfaceSecurityGroupFilter(net_filters.SecurityGroupFilter):
"""Network interface security group filter
:example:
.. code-block:: yaml
policies:
- name: network-interface-ssh
resource: eni
filters:
- type: security-group
match-resource: true
key: FromPort
value: 22
"""
RelatedIdsExpression = "Groups[].GroupId"
@NetworkInterface.filter_registry.register('vpc')
class InterfaceVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@NetworkInterface.action_registry.register('modify-security-groups')
class InterfaceModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Remove security groups from an interface.
Can target either physical groups as a list of group ids or
symbolic groups like 'matched' or 'all'. 'matched' uses
the annotations of the 'group' interface filter.
Note an interface always gets at least one security group, so
we also allow specification of an isolation/quarantine group
that can be specified if there would otherwise be no groups.
:example:
.. code-block:: yaml
policies:
- name: network-interface-remove-group
resource: eni
filters:
- type: security-group
match-resource: true
key: FromPort
value: 22
actions:
- type: modify-security-groups
isolation-group: sg-01ab23c4
add: []
"""
permissions = ('ec2:ModifyNetworkInterfaceAttribute',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
groups = super(
InterfaceModifyVpcSecurityGroups, self).get_groups(resources)
for idx, r in enumerate(resources):
client.modify_network_interface_attribute(
NetworkInterfaceId=r['NetworkInterfaceId'],
Groups=groups[idx])
@NetworkInterface.action_registry.register('delete')
class DeleteNetworkInterface(BaseAction):
"""Delete a network interface.
:example:
.. code-block:: yaml
policies:
- name: mark-orphaned-enis
comment: Flag abandoned Lambda VPC ENIs for deletion
resource: eni
filters:
- Status: available
- type: value
op: glob
key: Description
value: "AWS Lambda VPC ENI*"
- "tag:custodian_status": absent
actions:
- type: mark-for-op
tag: custodian_status
msg: "Orphaned Lambda VPC ENI: {op}@{action_date}"
op: delete
days: 1
- name: delete-marked-enis
comment: Delete flagged ENIs that have not been cleaned up naturally
resource: eni
filters:
- type: marked-for-op
tag: custodian_status
op: delete
actions:
- type: delete
"""
permissions = ('ec2:DeleteNetworkInterface',)
schema = type_schema('delete')
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
try:
self.manager.retry(
client.delete_network_interface,
NetworkInterfaceId=r['NetworkInterfaceId'])
except ClientError as err:
if not err.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound':
raise
@resources.register('route-table')
class RouteTable(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'route-table'
enum_spec = ('describe_route_tables', 'RouteTables', None)
name = id = 'RouteTableId'
filter_name = 'RouteTableIds'
filter_type = 'list'
id_prefix = "rtb-"
cfn_type = config_type = "AWS::EC2::RouteTable"
@RouteTable.filter_registry.register('vpc')
class RouteTableVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@RouteTable.filter_registry.register('subnet')
class SubnetRoute(net_filters.SubnetFilter):
"""Filter a route table by its associated subnet attributes."""
RelatedIdsExpression = "Associations[].SubnetId"
RelatedMapping = None
def get_related_ids(self, resources):
if self.RelatedIdMapping is None:
return super(SubnetRoute, self).get_related_ids(resources)
return list(itertools.chain(*[self.RelatedIdMapping[r['RouteTableId']] for r in resources]))
def get_related(self, resources):
rt_subnet_map = {}
main_tables = {}
manager = self.get_resource_manager()
for r in resources:
rt_subnet_map[r['RouteTableId']] = []
for a in r.get('Associations', ()):
if 'SubnetId' in a:
rt_subnet_map[r['RouteTableId']].append(a['SubnetId'])
elif a.get('Main'):
main_tables[r['VpcId']] = r['RouteTableId']
explicit_subnet_ids = set(itertools.chain(*rt_subnet_map.values()))
subnets = manager.resources()
for s in subnets:
if s['SubnetId'] in explicit_subnet_ids:
continue
if s['VpcId'] not in main_tables:
continue
rt_subnet_map.setdefault(main_tables[s['VpcId']], []).append(s['SubnetId'])
related_subnets = set(itertools.chain(*rt_subnet_map.values()))
self.RelatedIdMapping = rt_subnet_map
return {s['SubnetId']: s for s in subnets if s['SubnetId'] in related_subnets}
@RouteTable.filter_registry.register('route')
class Route(ValueFilter):
"""Filter a route table by its routes' attributes."""
schema = type_schema('route', rinherit=ValueFilter.schema)
schema_alias = False
def process(self, resources, event=None):
results = []
for r in resources:
matched = []
for route in r['Routes']:
if self.match(route):
matched.append(route)
if matched:
r.setdefault('c7n:matched-routes', []).extend(matched)
results.append(r)
return results
@resources.register('transit-gateway')
class TransitGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
enum_spec = ('describe_transit_gateways', 'TransitGateways', None)
name = id = 'TransitGatewayId'
arn = "TransitGatewayArn"
id_prefix = "tgw-"
filter_name = 'TransitGatewayIds'
filter_type = 'list'
cfn_type = 'AWS::EC2::TransitGateway'
class TransitGatewayAttachmentQuery(query.ChildResourceQuery):
def get_parent_parameters(self, params, parent_id, parent_key):
merged_params = dict(params)
merged_params.setdefault('Filters', []).append(
{'Name': parent_key, 'Values': [parent_id]})
return merged_params
@query.sources.register('transit-attachment')
class TransitAttachmentSource(query.ChildDescribeSource):
resource_query_factory = TransitGatewayAttachmentQuery
@resources.register('transit-attachment')
class TransitGatewayAttachment(query.ChildResourceManager):
child_source = 'transit-attachment'
class resource_type(query.TypeInfo):
service = 'ec2'
enum_spec = ('describe_transit_gateway_attachments', 'TransitGatewayAttachments', None)
parent_spec = ('transit-gateway', 'transit-gateway-id', None)
id_prefix = 'tgw-attach-'
name = id = 'TransitGatewayAttachmentId'
arn = False
cfn_type = 'AWS::EC2::TransitGatewayAttachment'
@resources.register('peering-connection')
class PeeringConnection(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-peering-connection'
enum_spec = ('describe_vpc_peering_connections',
'VpcPeeringConnections', None)
name = id = 'VpcPeeringConnectionId'
filter_name = 'VpcPeeringConnectionIds'
filter_type = 'list'
id_prefix = "pcx-"
cfn_type = config_type = "AWS::EC2::VPCPeeringConnection"
@PeeringConnection.filter_registry.register('cross-account')
class CrossAccountPeer(CrossAccountAccessFilter):
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=resolver.ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DescribeVpcPeeringConnections',)
def process(self, resources, event=None):
results = []
accounts = self.get_accounts()
owners = map(jmespath.compile, (
'AccepterVpcInfo.OwnerId', 'RequesterVpcInfo.OwnerId'))
for r in resources:
for o_expr in owners:
account_id = o_expr.search(r)
if account_id and account_id not in accounts:
r.setdefault(
'c7n:CrossAccountViolations', []).append(account_id)
results.append(r)
return results
@PeeringConnection.filter_registry.register('missing-route')
class MissingRoute(Filter):
"""Return peers which are missing a route in route tables.
If the peering connection is between two vpcs in the same account,
the connection is returned unless it is in present route tables in
each vpc.
If the peering connection is between accounts, then the local vpc's
route table is checked.
"""
schema = type_schema('missing-route')
permissions = ('ec2:DescribeRouteTables',)
def process(self, resources, event=None):
tables = self.manager.get_resource_manager(
'route-table').resources()
routed_vpcs = {}
mid = 'VpcPeeringConnectionId'
for t in tables:
for r in t.get('Routes', ()):
if mid in r:
routed_vpcs.setdefault(r[mid], []).append(t['VpcId'])
results = []
for r in resources:
if r[mid] not in routed_vpcs:
results.append(r)
continue
for k in ('AccepterVpcInfo', 'RequesterVpcInfo'):
if r[k]['OwnerId'] != self.manager.config.account_id:
continue
if r[k].get('Region') and r['k']['Region'] != self.manager.config.region:
continue
if r[k]['VpcId'] not in routed_vpcs[r['VpcPeeringConnectionId']]:
results.append(r)
break
return results
@resources.register('network-acl')
class NetworkAcl(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'network-acl'
enum_spec = ('describe_network_acls', 'NetworkAcls', None)
name = id = 'NetworkAclId'
filter_name = 'NetworkAclIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::NetworkAcl"
id_prefix = "acl-"
@NetworkAcl.filter_registry.register('subnet')
class AclSubnetFilter(net_filters.SubnetFilter):
"""Filter network acls by the attributes of their attached subnets.
:example:
.. code-block:: yaml
policies:
- name: subnet-acl
resource: network-acl
filters:
- type: subnet
key: "tag:Location"
value: Public
"""
RelatedIdsExpression = "Associations[].SubnetId"
@NetworkAcl.filter_registry.register('s3-cidr')
class AclAwsS3Cidrs(Filter):
"""Filter network acls by those that allow access to s3 cidrs.
Defaults to filtering those nacls that do not allow s3 communication.
:example:
Find all nacls that do not allow communication with s3.
.. code-block:: yaml
policies:
- name: s3-not-allowed-nacl
resource: network-acl
filters:
- s3-cidr
"""
# TODO allow for port specification as range
schema = type_schema(
's3-cidr',
egress={'type': 'boolean', 'default': True},
ingress={'type': 'boolean', 'default': True},
present={'type': 'boolean', 'default': False})
permissions = ('ec2:DescribePrefixLists',)
def process(self, resources, event=None):
ec2 = local_session(self.manager.session_factory).client('ec2')
cidrs = jmespath.search(
"PrefixLists[].Cidrs[]", ec2.describe_prefix_lists())
cidrs = [parse_cidr(cidr) for cidr in cidrs]
results = []
check_egress = self.data.get('egress', True)
check_ingress = self.data.get('ingress', True)
present = self.data.get('present', False)
for r in resources:
matched = {cidr: None for cidr in cidrs}
for entry in r['Entries']:
if entry['Egress'] and not check_egress:
continue
if not entry['Egress'] and not check_ingress:
continue
entry_cidr = parse_cidr(entry['CidrBlock'])
for c in matched:
if c in entry_cidr and matched[c] is None:
matched[c] = (
entry['RuleAction'] == 'allow' and True or False)
if present and all(matched.values()):
results.append(r)
elif not present and not all(matched.values()):
results.append(r)
return results
class DescribeElasticIp(query.DescribeSource):
def augment(self, resources):
return [r for r in resources if self.manager.resource_type.id in r]
@resources.register('elastic-ip', aliases=('network-addr',))
class NetworkAddress(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'eip-allocation'
enum_spec = ('describe_addresses', 'Addresses', None)
name = 'PublicIp'
id = 'AllocationId'
id_prefix = 'eipalloc-'
filter_name = 'AllocationIds'
filter_type = 'list'
config_type = "AWS::EC2::EIP"
source_mapping = {
'describe': DescribeElasticIp,
'config': query.ConfigSource
}
NetworkAddress.filter_registry.register('shield-enabled', IsShieldProtected)
NetworkAddress.action_registry.register('set-shield', SetShieldProtection)
@NetworkAddress.action_registry.register('release')
class AddressRelease(BaseAction):
"""Action to release elastic IP address(es)
Use the force option to cause any attached elastic IPs to
also be released. Otherwise, only unattached elastic IPs
will be released.
:example:
.. code-block:: yaml
policies:
- name: release-network-addr
resource: network-addr
filters:
- AllocationId: ...
actions:
- type: release
force: True
"""
schema = type_schema('release', force={'type': 'boolean'})
permissions = ('ec2:ReleaseAddress', 'ec2:DisassociateAddress',)
def process_attached(self, client, associated_addrs):
for aa in list(associated_addrs):
try:
client.disassociate_address(AssociationId=aa['AssociationId'])
except ClientError as e:
# If its already been diassociated ignore, else raise.
if not(e.response['Error']['Code'] == 'InvalidAssocationID.NotFound' and
aa['AssocationId'] in e.response['Error']['Message']):
raise e
associated_addrs.remove(aa)
return associated_addrs
def process(self, network_addrs):
client = local_session(self.manager.session_factory).client('ec2')
force = self.data.get('force')
assoc_addrs = [addr for addr in network_addrs if 'AssociationId' in addr]
unassoc_addrs = [addr for addr in network_addrs if 'AssociationId' not in addr]
if len(assoc_addrs) and not force:
self.log.warning(
"Filtered %d attached eips of %d eips. Use 'force: true' to release them.",
len(assoc_addrs), len(network_addrs))
elif len(assoc_addrs) and force:
unassoc_addrs = itertools.chain(
unassoc_addrs, self.process_attached(client, assoc_addrs))
for r in unassoc_addrs:
try:
client.release_address(AllocationId=r['AllocationId'])
except ClientError as e:
# If its already been released, ignore, else raise.
if e.response['Error']['Code'] != 'InvalidAllocationID.NotFound':
raise
@resources.register('customer-gateway')
class CustomerGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'customer-gateway'
enum_spec = ('describe_customer_gateways', 'CustomerGateways', None)
id = 'CustomerGatewayId'
filter_name = 'CustomerGatewayIds'
filter_type = 'list'
name = 'CustomerGatewayId'
id_prefix = "cgw-"
cfn_type = config_type = 'AWS::EC2::CustomerGateway'
@resources.register('internet-gateway')
class InternetGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'internet-gateway'
enum_spec = ('describe_internet_gateways', 'InternetGateways', None)
name = id = 'InternetGatewayId'
filter_name = 'InternetGatewayIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::InternetGateway"
id_prefix = "igw-"
@InternetGateway.action_registry.register('delete')
class DeleteInternetGateway(BaseAction):
"""Action to delete Internet Gateway
:example:
.. code-block:: yaml
policies:
- name: delete-internet-gateway
resource: internet-gateway
actions:
- type: delete
"""
schema = type_schema('delete')
permissions = ('ec2:DeleteInternetGateway',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
try:
client.delete_internet_gateway(InternetGatewayId=r['InternetGatewayId'])
except ClientError as err:
if not err.response['Error']['Code'] == 'InvalidInternetGatewayId.NotFound':
raise
@resources.register('nat-gateway')
class NATGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'nat-gateway'
enum_spec = ('describe_nat_gateways', 'NatGateways', None)
name = id = 'NatGatewayId'
filter_name = 'NatGatewayIds'
filter_type = 'list'
date = 'CreateTime'
dimension = 'NatGatewayId'
metrics_namespace = 'AWS/NATGateway'
id_prefix = "nat-"
cfn_type = config_type = 'AWS::EC2::NatGateway'
@NATGateway.action_registry.register('delete')
class DeleteNATGateway(BaseAction):
schema = type_schema('delete')
permissions = ('ec2:DeleteNatGateway',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
client.delete_nat_gateway(NatGatewayId=r['NatGatewayId'])
@resources.register('vpn-connection')
class VPNConnection(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-connection'
enum_spec = ('describe_vpn_connections', 'VpnConnections', None)
name = id = 'VpnConnectionId'
filter_name = 'VpnConnectionIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPNConnection'
id_prefix = "vpn-"
@resources.register('vpn-gateway')
class VPNGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-gateway'
enum_spec = ('describe_vpn_gateways', 'VpnGateways', None)
name = id = 'VpnGatewayId'
filter_name = 'VpnGatewayIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPNGateway'
id_prefix = "vgw-"
@resources.register('vpc-endpoint')
class VpcEndpoint(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-endpoint'
enum_spec = ('describe_vpc_endpoints', 'VpcEndpoints', None)
name = id = 'VpcEndpointId'
date = 'CreationTimestamp'
filter_name = 'VpcEndpointIds'
filter_type = 'list'
id_prefix = "vpce-"
universal_taggable = object()
cfn_type = config_type = "AWS::EC2::VPCEndpoint"
@VpcEndpoint.filter_registry.register('cross-account')
class EndpointCrossAccountFilter(CrossAccountAccessFilter):
policy_attribute = 'PolicyDocument'
annotation_key = 'c7n:CrossAccountViolations'
permissions = ('ec2:DescribeVpcEndpoints',)
@VpcEndpoint.filter_registry.register('security-group')
class EndpointSecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "Groups[].GroupId"
@VpcEndpoint.filter_registry.register('subnet')
class EndpointSubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "SubnetIds[]"
@VpcEndpoint.filter_registry.register('vpc')
class EndpointVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@Vpc.filter_registry.register("vpc-endpoint")
class VPCEndpointFilter(RelatedResourceByIdFilter):
"""Filters vpcs based on their vpc-endpoints
:example:
.. code-block:: yaml
policies:
- name: s3-vpc-endpoint-enabled
resource: vpc
filters:
- type: vpc-endpoint
key: ServiceName
value: com.amazonaws.us-east-1.s3
"""
RelatedResource = "c7n.resources.vpc.VpcEndpoint"
RelatedIdsExpression = "VpcId"
AnnotationKey = "matched-vpc-endpoint"
schema = type_schema(
'vpc-endpoint',
rinherit=ValueFilter.schema)
@Subnet.filter_registry.register("vpc-endpoint")
class SubnetEndpointFilter(RelatedResourceByIdFilter):
"""Filters subnets based on their vpc-endpoints
:example:
.. code-block:: yaml
policies:
- name: athena-endpoint-enabled
resource: subnet
filters:
- type: vpc-endpoint
key: ServiceName
value: com.amazonaws.us-east-1.athena
"""
RelatedResource = "c7n.resources.vpc.VpcEndpoint"
RelatedIdsExpression = "SubnetId"
RelatedResourceByIdExpression = "SubnetIds"
AnnotationKey = "matched-vpc-endpoint"
schema = type_schema(
'vpc-endpoint',
rinherit=ValueFilter.schema)
@resources.register('key-pair')
class KeyPair(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'key-pair'
enum_spec = ('describe_key_pairs', 'KeyPairs', None)
name = 'KeyName'
id = 'KeyPairId'
id_prefix = 'key-'
filter_name = 'KeyNames'
filter_type = 'list'
@KeyPair.filter_registry.register('unused')
class UnusedKeyPairs(Filter):
"""Filter for used or unused keys.
The default is unused but can be changed by using the state property.
:example:
.. code-block:: yaml
policies:
- name: unused-key-pairs
resource: aws.key-pair
filters:
- unused
- name: used-key-pairs
resource: aws.key-pair
filters:
- type: unused
state: false
"""
annotation_key = 'c7n:unused_keys'
permissions = ('ec2:DescribeKeyPairs',)
schema = type_schema('unused',
state={'type': 'boolean'})
def process(self, resources, event=None):
instances = self.manager.get_resource_manager('ec2').resources()
used = set(jmespath.search('[].KeyName', instances))
if self.data.get('state', True):
return [r for r in resources if r['KeyName'] not in used]
else:
return [r for r in resources if r['KeyName'] in used]
@KeyPair.action_registry.register('delete')
class DeleteUnusedKeyPairs(BaseAction):
"""Delete all ec2 keys that are not in use
This should always be used with the unused filter
and it will prevent you from using without it.
:example:
.. code-block:: yaml
policies:
- name: delete-unused-key-pairs
resource: aws.key-pair
filters:
- unused
actions:
- delete
"""
permissions = ('ec2:DeleteKeyPair',)
schema = type_schema('delete')
def validate(self):
if not [f for f in self.manager.iter_filters() if isinstance(f, UnusedKeyPairs)]:
raise PolicyValidationError(
"delete should be used in conjunction with the unused filter on %s" % (
self.manager.data,))
if [True for f in self.manager.iter_filters() if f.data.get('state') is False]:
raise PolicyValidationError(
"You policy has filtered used keys you should use this with unused keys %s" % (
self.manager.data,))
return self
def process(self, unused):
client = local_session(self.manager.session_factory).client('ec2')
for key in unused:
client.delete_key_pair(KeyPairId=key['KeyPairId'])
@Vpc.action_registry.register('set-flow-log')
@Subnet.action_registry.register('set-flow-log')
@NetworkInterface.action_registry.register('set-flow-log')
class CreateFlowLogs(BaseAction):
"""Create flow logs for a network resource
:example:
.. code-block:: yaml
policies:
- name: vpc-enable-flow-logs
resource: vpc
filters:
- type: flow-logs
enabled: false
actions:
- type: set-flow-log
DeliverLogsPermissionArn: arn:iam:role
LogGroupName: /custodian/vpc/flowlogs/
"""
permissions = ('ec2:CreateFlowLogs', 'logs:CreateLogGroup',)
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['set-flow-log']},
'state': {'type': 'boolean'},
'DeliverLogsPermissionArn': {'type': 'string'},
'LogGroupName': {'type': 'string'},
'LogDestination': {'type': 'string'},
'LogFormat': {'type': 'string'},
'MaxAggregationInterval': {'type': 'integer'},
'LogDestinationType': {'enum': ['s3', 'cloud-watch-logs']},
'TrafficType': {
'type': 'string',
'enum': ['ACCEPT', 'REJECT', 'ALL']
}
}
}
RESOURCE_ALIAS = {
'vpc': 'VPC',
'subnet': 'Subnet',
'eni': 'NetworkInterface'
}
SchemaValidation = {
's3': {
'required': ['LogDestination'],
'absent': ['LogGroupName', 'DeliverLogsPermissionArn']
},
'cloud-watch-logs': {
'required': ['DeliverLogsPermissionArn'],
'one-of': ['LogGroupName', 'LogDestination'],
}
}
def validate(self):
self.state = self.data.get('state', True)
if not self.state:
return
destination_type = self.data.get(
'LogDestinationType', 'cloud-watch-logs')
dvalidation = self.SchemaValidation[destination_type]
for r in dvalidation.get('required', ()):
if not self.data.get(r):
raise PolicyValidationError(
'Required %s missing for destination-type:%s' % (
r, destination_type))
for r in dvalidation.get('absent', ()):
if r in self.data:
raise PolicyValidationError(
'%s is prohibited for destination-type:%s' % (
r, destination_type))
if ('one-of' in dvalidation and
sum([1 for k in dvalidation['one-of'] if k in self.data]) != 1):
raise PolicyValidationError(
"Destination:%s Exactly one of %s required" % (
destination_type, ", ".join(dvalidation['one-of'])))
return self
def delete_flow_logs(self, client, rids):
flow_logs = client.describe_flow_logs(
Filters=[{'Name': 'resource-id', 'Values': rids}])['FlowLogs']
try:
results = client.delete_flow_logs(
FlowLogIds=[f['FlowLogId'] for f in flow_logs])
for r in results['Unsuccessful']:
self.log.exception(
'Exception: delete flow-log for %s: %s on %s',
r['ResourceId'], r['Error']['Message'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidParameterValue':
self.log.exception(
'delete flow-log: %s', e.response['Error']['Message'])
else:
raise
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
params = dict(self.data)
params.pop('type')
if self.data.get('state'):
params.pop('state')
model = self.manager.get_model()
params['ResourceIds'] = [r[model.id] for r in resources]
if not self.state:
self.delete_flow_logs(client, params['ResourceIds'])
return
params['ResourceType'] = self.RESOURCE_ALIAS[model.arn_type]
params['TrafficType'] = self.data.get('TrafficType', 'ALL').upper()
params['MaxAggregationInterval'] = self.data.get('MaxAggregationInterval', 600)
if self.data.get('LogDestinationType', 'cloud-watch-logs') == 'cloud-watch-logs':
self.process_log_group(self.data.get('LogGroupName'))
try:
results = client.create_flow_logs(**params)
for r in results['Unsuccessful']:
self.log.exception(
'Exception: create flow-log for %s: %s',
r['ResourceId'], r['Error']['Message'])
except ClientError as e:
if e.response['Error']['Code'] == 'FlowLogAlreadyExists':
self.log.exception(
'Exception: create flow-log: %s',
e.response['Error']['Message'])
else:
raise
def process_log_group(self, logroup):
client = local_session(self.manager.session_factory).client('logs')
try:
client.create_log_group(logGroupName=logroup)
except client.exceptions.ResourceAlreadyExistsException:
pass
|
the-stack_0_10318 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
import binascii
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from contextlib import contextmanager
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from itertools import chain
from threading import RLock, Thread
import uuid
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
from test_framework.streams import StreamType
BIP0031_VERSION = 60000
MY_VERSION = 70015 # INVALID_CB_NO_BAN_VERSION
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
# from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MY_RELAY = 1
MAX_INV_SZ = 50000
MAX_PROTOCOL_RECV_PAYLOAD_LENGTH = 2 * 1024 * 1024
LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH = 1 * 1024 * 1024
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_XTHIN = (1 << 4)
NODE_BITCOIN_CASH = (1 << 5)
# Howmuch data will be read from the network at once
READ_BUFFER_SIZE = 8192
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Lock used to synchronize access to data required by loop running in NetworkThread.
# It must be locked, for example, when adding new NodeConn object, otherwise loop in
# NetworkThread may try to access partially constructed object.
network_thread_loop_lock = RLock()
# Network thread acquires network_thread_loop_lock at start of each iteration and releases
# it at the end. Since the next iteration is run immediately after that, lock is acquired
# almost all of the time making it difficult for other threads to also acquire this lock.
# To work around this problem, NetworkThread first acquires network_thread_loop_intent_lock
# and immediately releases it before acquiring network_thread_loop_lock.
# Other threads (e.g. the ones calling NodeConn constructor) acquire both locks before
# proceeding. The end result is that other threads wait at most one iteration of loop in
# NetworkThread.
network_thread_loop_intent_lock = RLock()
# ports used by chain type
NETWORK_PORTS = {
"mainnet" : 8333,
"testnet3" : 18333,
"stn" : 9333,
"regtest" : 18444
}
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def generator_based_serializator(fn):
def decorated(object_collection, *args, **kwargs):
first_elem = ser_compact_size(len(object_collection))
obj_generator = fn(object_collection, *args, **kwargs)
return b"".join(chain((first_elem,), obj_generator))
return decorated
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def ser_varint(v):
r = b""
length = 0
while True:
r += struct.pack("<B", (v & 0x7F) | (0x80 if length > 0 else 0x00))
if(v <= 0x7F):
return r[::-1] # Need as little-endian
v = (v >> 7) - 1
length += 1
def deser_varint(f):
ntot = 0
while True:
n = struct.unpack("<B", f.read(1))[0]
ntot = (n << 7) | (n & 0x7F)
if((n & 0x80) == 0):
return ntot
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
@generator_based_serializator
def ser_string(s):
return (s,) # return tuple with single member
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector.
@generator_based_serializator
def ser_vector(l, ser_function_name=""):
# using generator because of need for lazy evaluation
return (getattr(i, ser_function_name, i.serialize )() for i in l)
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
@generator_based_serializator
def ser_uint256_vector(l):
return (ser_uint256(i) for i in l)
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
@generator_based_serializator
def ser_string_vector(l):
return (ser_string(sv) for sv in l)
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
@generator_based_serializator
def ser_int_vector(l):
return (struct.pack("<i", i) for i in l)
def deser_varint_vector(f):
nit = deser_varint(f)
r = []
for i in range(nit):
t = deser_varint(f)
r.append(t)
return r
def ser_varint_vector(l):
r = ser_varint(len(l))
for v in l:
r += ser_varint(v)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Serialise a UUID association ID as a stream of bytes for sending over the network
def serialise_uuid_associd(assocId):
assocIdBytes = bytes()
if(assocId):
assocIdPlusType = b"".join((
struct.pack("<B", 0),
assocId.bytes
))
assocIdBytes = ser_string(assocIdPlusType)
return assocIdBytes
# Deserialise an association ID from the network into a UUID
def deserialise_uuid_associd(raw):
return uuid.UUID(bytes=raw[1:])
# Create a new random association ID
def create_association_id():
return uuid.uuid4()
# Objects that map to bitcoind objects, which can be serialized/deserialized
# Because the nVersion field has not been passed before the VERSION message the protocol uses an old format for the CAddress (missing nTime)
# This class handles that old format
class CAddressInVersion(object):
def __init__(self, ip="0.0.0.0", port=0):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2 # ip is 16 bytes on wire to handle v6
self.ip = ip
self.port = port
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b"".join((
struct.pack("<Q", self.nServices),
self.pchReserved,
socket.inet_aton(self.ip),
struct.pack(">H", self.port),))
return r
def __repr__(self):
return "CAddressInVersion(nServices=%i ip=%s port=%i)" % (self.nServices, self.ip, self.port)
# Handle new-style CAddress objects (with nTime)
class CAddress():
def __init__(self, ip="0.0.0.0", port=0):
self.nServices = 1
self.nTime = int(time.time())
self.pchReserved = b"\x00" * 10 + b"\xff" * 2 # ip is 16 bytes on wire to handle v6
self.ip = ip
self.port = port
def deserialize(self, f):
self.nTime = struct.unpack("<L", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<L", self.nTime)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i time=%d)" % (self.nServices, self.ip, self.port, self.nTime)
class CInv():
ERROR = 0
TX = 1
BLOCK = 2
COMPACT_BLOCK = 4
typemap = {
ERROR: "Error",
TX: "TX",
BLOCK: "Block",
COMPACT_BLOCK: "CompactBlock"
}
def __init__(self, t=ERROR, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b"".join((
struct.pack("<i", self.type),
ser_uint256(self.hash),))
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
def estimateMaxInvElements(max_payload_length=MAX_PROTOCOL_RECV_PAYLOAD_LENGTH):
return int((max_payload_length - 8) / (4 + 32))
class CProtoconf():
def __init__(self, number_of_fields=2, max_recv_payload_length=0, stream_policies=b"Default"):
self.number_of_fields = number_of_fields
self.max_recv_payload_length = max_recv_payload_length
self.stream_policies = stream_policies
def deserialize(self, f):
self.number_of_fields = deser_compact_size(f)
self.max_recv_payload_length = struct.unpack("<i", f.read(4))[0]
if self.number_of_fields > 1:
self.stream_policies = deser_string(f)
def serialize(self):
r = b""
r += ser_compact_size(self.number_of_fields)
r += struct.pack("<i", self.max_recv_payload_length)
if self.number_of_fields > 1:
r += ser_string(self.stream_policies)
return r
def __repr__(self):
return "CProtoconf(number_of_fields=%064x max_recv_payload_length=%064x stream_policies=%s)" \
% (self.number_of_fields, self.max_recv_payload_length, self.stream_policies)
class CBlockLocator():
def __init__(self, have=[]):
self.nVersion = MY_VERSION
self.vHave = have
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
ser_uint256_vector(self.vHave),))
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b"".join((
ser_uint256(self.hash),
struct.pack("<I", self.n),))
return r
def __hash__(self):
return self.hash + self.n
def __eq__(self, other):
return self.n == other.n and self.hash == other.hash
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b"".join((
self.prevout.serialize(),
ser_string(self.scriptSig),
struct.pack("<I", self.nSequence),))
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b"".join((
struct.pack("<q", self.nValue),
ser_string(self.scriptPubKey),))
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
ser_vector(self.vin),
ser_vector(self.vout),
struct.pack("<I", self.nLockTime),))
return r
# Recalculate the txid
def rehash(self):
self.sha256 = None
self.calc_sha256()
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(
hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
self.rehash()
return "CTransaction(hash=%s nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.hash, self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None, json_notification=None):
if json_notification is None:
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
else:
self.nVersion = json_notification["version"]
self.hashPrevBlock = uint256_from_str(hex_str_to_bytes(json_notification["hashPrevBlock"])[::-1])
self.hashMerkleRoot = uint256_from_str(hex_str_to_bytes(json_notification["hashMerkleRoot"])[::-1])
self.nTime = json_notification["time"]
self.nBits = json_notification["bits"]
self.nNonce = json_notification["nonce"]
self.rehash()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
ser_uint256(self.hashPrevBlock),
ser_uint256(self.hashMerkleRoot),
struct.pack("<I", self.nTime),
struct.pack("<I", self.nBits),
struct.pack("<I", self.nNonce),))
return r
def calc_sha256(self):
if self.sha256 is None:
r = b"".join((
struct.pack("<i", self.nVersion),
ser_uint256(self.hashPrevBlock),
ser_uint256(self.hashMerkleRoot),
struct.pack("<I", self.nTime),
struct.pack("<I", self.nBits),
struct.pack("<I", self.nNonce),))
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
self.rehash()
return "CBlockHeader(hash=%s nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.hash, self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b"".join((
super(CBlock, self).serialize(),
ser_vector(self.vtx),))
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i + 1, len(hashes) - 1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
self.rehash()
return "CBlock(hash=%s nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.hash, self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert():
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
struct.pack("<q", self.nRelayUntil),
struct.pack("<q", self.nExpiration),
struct.pack("<i", self.nID),
struct.pack("<i", self.nCancel),
ser_int_vector(self.setCancel),
struct.pack("<i", self.nMinVer),
struct.pack("<i", self.nMaxVer),
ser_string_vector(self.setSubVer),
struct.pack("<i", self.nPriority),
ser_string(self.strComment),
ser_string(self.strStatusBar),
ser_string(self.strReserved),))
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert():
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b"".join((
ser_string(self.vchMsg),
ser_string(self.vchSig),))
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction():
def __init__(self, index=0, tx=None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self):
r = b"".join((
ser_compact_size(self.index),
self.tx.serialize(),))
return r
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(
struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
def serialize(self):
r = b"".join((
self.header.serialize(),
struct.pack("<Q", self.nonce),
ser_compact_size(self.shortids_length),
b"".join( struct.pack("<Q", x)[0:6] for x in self.shortids), # We only want the first 6 bytes
ser_vector(self.prefilled_txn),))
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids=None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(
PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(
PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [key0, key1]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=[0]):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [PrefilledTransaction(i, block.vtx[i])
for i in prefill_list]
self.shortids = []
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
# callback message for dsnt-enabled transactions
class CallbackMessage():
# 127.0.0.1 as network-order bytes
LOCAL_HOST_IP = 0x7F000001
MAX_INT64 = 0xFFFFFFFFFFFFFFFF
IPv6_version = 129
IPv4_version = 1
def __init__(self, version=1, ip_addresses=[LOCAL_HOST_IP], inputs=[0]):
self.version = version
self.ip_addresses = ip_addresses
self.ip_address_count = len(ip_addresses)
self.inputs = inputs
def ser_addrs(self, addrs):
rs = b""
for addr in addrs:
if (self.version == self.IPv6_version):
rs += struct.pack('>QQ', (addr >> 64) & self.MAX_INT64, addr & self.MAX_INT64)
else:
rs += struct.pack("!I", addr)
return rs
def deser_addrs(self, f):
addrs = []
for i in range(self.ip_address_count):
if (self.version == self.IPv6_version):
a, b = struct.unpack('>QQ', f.read(16))
unpacked = (a << 64) | b
addrs.append(unpacked)
else:
addrs.append(struct.unpack("!I", f.read(4))[0])
return addrs
def deserialize(self, f):
self.version = struct.unpack("<B", f.read(1))[0]
self.ip_address_count = deser_compact_size(f)
self.ip_addresses = self.deser_addrs(f)
self.inputs = deser_varint_vector(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.version)
r += ser_compact_size(self.ip_address_count)
r += self.ser_addrs(self.ip_addresses)
r += ser_varint_vector(self.inputs)
return r
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes=None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b"".join((
ser_uint256(self.blockhash),
ser_compact_size(len(self.indexes)),
b"".join(ser_compact_size(x) for x in self.indexes)))
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x - last_index - 1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x + last_index + 1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions=None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self):
r = b"".join((
ser_uint256(self.blockhash),
ser_vector(self.transactions),))
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddressInVersion()
self.addrFrom = CAddressInVersion()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
self.assocID = create_association_id()
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddressInVersion()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddressInVersion()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
try:
uuidBytes = deser_string(f)
self.assocID = deserialise_uuid_associd(uuidBytes)
except:
self.assocID = None
except:
self.nRelay = 0
else:
self.nRelay = 0
self.assocID = None
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
struct.pack("<Q", self.nServices),
struct.pack("<q", self.nTime),
self.addrTo.serialize(),
self.addrFrom.serialize(),
struct.pack("<Q", self.nNonce),
ser_string(self.strSubVer),
struct.pack("<i", self.nStartingHeight),
struct.pack("<b", self.nRelay),
serialise_uuid_associd(self.assocID),
))
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i assocID=%s)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay, str(self.assocID))
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_createstream():
command = b"createstrm"
def __init__(self, stream_type, stream_policy=b"", assocID=None):
self.assocID = assocID
self.stream_type = stream_type
self.stream_policy = stream_policy
def deserialize(self, f):
uuidBytes = deser_string(f)
self.assocID = deserialise_uuid_associd(uuidBytes)
self.stream_type = struct.unpack("<B", f.read(1))[0]
self.stream_policy = deser_string(f)
def serialize(self):
return b"".join((
serialise_uuid_associd(self.assocID),
struct.pack("<B", self.stream_type),
ser_string(self.stream_policy),
))
def __repr__(self):
return "msg_createstream(assocID=%s stream_type=%i stream_policy=%s)" % (str(self.assocID), self.stream_type,
str(self.stream_policy))
class msg_streamack():
command = b"streamack"
def __init__(self, assocID=None, stream_type=StreamType.UNKNOWN.value):
self.assocID = assocID
self.stream_type = stream_type
def deserialize(self, f):
uuidBytes = deser_string(f)
self.assocID = deserialise_uuid_associd(uuidBytes)
self.stream_type = struct.unpack("<B", f.read(1))[0]
def serialize(self):
return b"".join((
serialise_uuid_associd(self.assocID),
struct.pack("<B", self.stream_type),
))
def __repr__(self):
return "msg_streamack(assocID=%s stream_type=%i)" % (str(self.assocID), self.stream_type)
class msg_protoconf():
command = b"protoconf"
def __init__(self, protoconf=None):
if protoconf is None:
self.protoconf = CProtoconf(2,0,b"")
else:
self.protoconf = protoconf
def deserialize(self, f):
self.inv = self.protoconf.deserialize(f)
def serialize(self):
r = b""
r += self.protoconf.serialize()
return r
def __repr__(self):
return "msg_protoconf(protoconf=%s)" % (repr(self.protoconf))
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert():
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
return self.alert.serialize()
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b"".join((
self.locator.serialize(),
ser_uint256(self.hashstop),))
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31():
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
return struct.pack("<Q", self.nonce)
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
return struct.pack("<Q", self.nonce)
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self, locator_have=[]):
self.locator = CBlockLocator(locator_have)
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b"".join((
self.locator.serialize(),
ser_uint256(self.hashstop),))
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self, message=b"", code=0, reason=b"", data=0):
self.message = message
self.code = code
self.reason = reason
self.data = data
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
return struct.pack("<Q", self.feerate)
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self, announce=False):
self.announce = announce
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b"".join((
struct.pack("<?", self.announce),
struct.pack("<Q", self.version),))
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids=None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
return self.header_and_shortids.serialize()
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
return self.block_txn_request.serialize()
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
return self.block_transactions.serialize()
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_notfound():
command = b"notfound"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_notfound(inv=%s)" % (repr(self.inv))
# Data for the merkle proof node part of the double-spend detected P2P message
class MerkleProofNode():
def __init__(self, node=0):
self.nodeType = 0
self.node = node
def deserialize(self, f):
self.nodeType = struct.unpack("<B", f.read(1))[0]
# Currently only type 0 is supported (it means node is always uint256)
assert(self.nodeType == 0)
self.node = deser_uint256(f)
def serialize(self):
r = b"".join((
struct.pack("<B", self.nodeType),
ser_uint256(self.node),))
return r
def __repr__(self):
return "MerkleProofNode(type=%i node=%064x)" % (self.nodeType, self.node)
# Data for the merkle proof part of the double-spend detected P2P message
class DSMerkleProof():
def __init__(self, txIndex=0, tx=CTransaction(), merkleRoot=0, proof=None, json_notification=None):
if json_notification is None:
self.txIndex = txIndex
self.tx = tx
self.merkleRoot = merkleRoot
if proof is None:
self.proof = []
else:
self.proof = proof
else:
self.txIndex = json_notification["index"]
self.tx = FromHex(CTransaction(), json_notification["txOrId"])
# Only merkleRoot target type is currently supported
assert(json_notification["targetType"] == "merkleRoot")
self.merkleRoot = uint256_from_str(hex_str_to_bytes(json_notification["target"])[::-1])
self.proof = []
for node in json_notification["nodes"]:
self.proof.append(MerkleProofNode(uint256_from_str(hex_str_to_bytes(node)[::-1])))
def deserialize(self, f):
flags = struct.unpack("<B", f.read(1))[0]
# Should always be 5
assert(flags == 5)
self.txIndex = deser_compact_size(f)
# Length of transaction bytes is deserialized as required by the specification, but we don't actually need it to deserialize the transaction
deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
self.merkleRoot = deser_uint256(f)
self.proof = deser_vector(f, MerkleProofNode)
def serialize(self):
txSerialized = self.tx.serialize()
r = b"".join((
struct.pack("<B", 5),
ser_compact_size(self.txIndex),
ser_compact_size(len(txSerialized)),
txSerialized,
ser_uint256(self.merkleRoot),
ser_vector(self.proof),))
return r
def __repr__(self):
return "DSMerkleProof(txIndex=%i tx=%s merkleRoot=%064x proof=%s)" % (self.txIndex, repr(self.tx), self.merkleRoot, repr(self.proof))
# Data for the block details part of the double-spend detected P2P message
class BlockDetails():
def __init__(self, blockHeaders=None, merkleProof=DSMerkleProof(), json_notification=None):
if json_notification is None:
if blockHeaders is None:
self.blockHeaders = []
else:
self.blockHeaders = blockHeaders
self.merkleProof = merkleProof
else:
self.blockHeaders = []
for blockHeader in json_notification["headers"]:
self.blockHeaders.append(CBlockHeader(json_notification=blockHeader))
self.merkleProof = DSMerkleProof(json_notification=json_notification["merkleProof"])
def deserialize(self, f):
self.blockHeaders = deser_vector(f, CBlockHeader)
self.merkleProof = DSMerkleProof()
self.merkleProof.deserialize(f)
def serialize(self):
r = b"".join((
ser_vector(self.blockHeaders),
self.merkleProof.serialize(),))
return r
def __repr__(self):
return "BlockDetails(blockHeaders=%s merkleProof=%s)" % (repr(self.blockHeaders), repr(self.merkleProof))
# Double-spend detected P2P message
class msg_dsdetected():
command = b"dsdetected"
def __init__(self, version=1, blocksDetails=None, json_notification=None):
if (json_notification is None):
self.version = version
if blocksDetails is None:
self.blocksDetails = []
else:
self.blocksDetails = blocksDetails
else:
self.version = json_notification["version"]
self.blocksDetails = []
for json_blockDetails in json_notification["blocks"]:
self.blocksDetails.append(BlockDetails(json_notification=json_blockDetails))
def deserialize(self, f):
self.version = struct.unpack("<H", f.read(2))[0]
self.blocksDetails = deser_vector(f, BlockDetails)
def serialize(self):
r = b"".join((
struct.pack("<H", self.version),
ser_vector(self.blocksDetails),))
return r
def __repr__(self):
return "msg_dsdetected(version=%i blocksDetails=%s)" % (self.version, repr(self.blocksDetails))
class NodeConnCB():
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.msg_timestamp = {}
self.last_message = {}
self.time_index = 0
self.msg_index = defaultdict(int)
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
self.msg_timestamp[command] = time.time()
self.msg_index[command] = self.time_index
self.time_index +=1
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
raise
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_streamack(self, conn, message): pass
def on_protoconf(self, conn, message): pass
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
self.send_protoconf(conn)
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_notfound(self, conn, message): pass
def send_protoconf(self, conn):
conn.send_message(msg_protoconf(CProtoconf(2, MAX_PROTOCOL_RECV_PAYLOAD_LENGTH, b"BlockPriority,Default")))
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
def test_function(): return not self.connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def clear_messages(self):
with mininode_lock:
self.message_count.clear()
def wait_for_block(self, blockhash, timeout=60):
def test_function(): return self.last_message.get(
"block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
def test_function(): return self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
def test_function(): return self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60, check_interval=0.05):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError(
"wait_for_inv() will only verify the first inv object")
def test_function(): return self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock, check_interval=check_interval)
def wait_for_verack(self, timeout=60):
def test_function(): return self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_reject(self, timeout=60):
def test_function(): return self.message_count["reject"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_protoconf(self, timeout=60):
def test_function(): return self.message_count["protoconf"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_streamack(self, timeout=60):
def test_function(): return self.message_count["streamack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
# use ping to guarantee that previously sent p2p messages were processed
self.send_message(msg_ping(nonce=self.ping_counter))
def test_function():
if not self.last_message.get("pong"):
return False
if self.last_message["pong"].nonce != self.ping_counter:
return False
# after we receive pong we need to check that there are no async
# block/transaction processes still running
activity = self.connection.rpc.getblockchainactivity()
return sum(activity.values()) == 0
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
@contextmanager
def temporary_override_callback(self, **callbacks):
old_callbacks = {cb_name: getattr(self, cb_name) for cb_name in callbacks.keys()}
for cb_name, cb in callbacks.items():
setattr(self, cb_name, cb)
yield
for cb_name, cb in old_callbacks.items():
setattr(self, cb_name, cb)
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"protoconf": msg_protoconf,
b"verack": msg_verack,
b"createstrm": msg_createstream,
b"streamack": msg_streamack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn,
b"notfound": msg_notfound
}
MAGIC_BYTES = {
"mainnet": b"\xe3\xe1\xf3\xe8",
"testnet3": b"\xf4\xe5\xf3\xf4",
"stn": b"\xfb\xce\xc4\xf9",
"regtest": b"\xda\xb5\xbf\xfa",
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True,
strSubVer=None, assocID=None, nullAssocID=False):
# Lock must be acquired when new object is added to prevent NetworkThread from trying
# to access partially constructed object or trying to call callbacks before the connection
# is established.
with network_thread_loop_intent_lock, network_thread_loop_lock:
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = bytearray()
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
self.maxInvElements = CInv.estimateMaxInvElements(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH)
self.strSubVer = strSubVer
self.assocID = assocID
if(assocID):
send_version = False
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
if(strSubVer):
vt.strSubVer = strSubVer
if(nullAssocID):
vt.assocID = None
self.send_message(vt, True)
self.assocID = vt.assocID
logger.info('Connecting to Bitcoin Node: %s:%d' %
(self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" %
(self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" %
(self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = bytearray()
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
with mininode_lock:
t = self.recv(READ_BUFFER_SIZE)
if len(t) > 0:
self.recvbuf += t
while True:
msg = self.got_data()
if msg == None:
break
self.got_message(msg)
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
del self.sendbuf[:sent]
def got_data(self):
try:
with mininode_lock:
if len(self.recvbuf) < 4:
return None
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return None
command = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0]
payloadlen = struct.unpack(
"<i", self.recvbuf[4 + 12:4 + 12 + 4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + payloadlen:
return None
msg = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + payloadlen]
self.recvbuf = self.recvbuf[4 + 12 + 4 + payloadlen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return None
command = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0]
payloadlen = struct.unpack(
"<i", self.recvbuf[4 + 12:4 + 12 + 4])[0]
checksum = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + 4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + payloadlen:
return None
msg = self.recvbuf[4 + 12 + 4 + 4:4 + 12 + 4 + 4 + payloadlen]
h = sha256(sha256(msg))
if checksum != h[:4]:
raise ValueError(
"got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4 + 12 + 4 + 4 + payloadlen:]
if command not in self.messagemap:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (
self.dstaddr, self.dstport, command, repr(msg)))
raise ValueError("Unknown command: '%s'" % (command))
f = BytesIO(msg)
m = self.messagemap[command]()
m.deserialize(f)
return m
except Exception as e:
logger.exception('got_data:', repr(e))
raise
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr,
self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
NetworkThread_should_stop = False
def StopNetworkThread():
global NetworkThread_should_stop
NetworkThread_should_stop = True
class NetworkThread(Thread):
poll_timeout = 0.1
def run(self):
while mininode_socket_map and not NetworkThread_should_stop:
with network_thread_loop_intent_lock:
# Acquire and immediately release lock.
# This allows other threads to more easily acquire network_thread_loop_lock by
# acquiring (and holding) network_thread_loop_intent_lock first since NetworkThread
# will block on trying to acquire network_thread_loop_intent_lock in the line above.
# If this was not done, other threads would need to wait for a long time (>10s) for
# network_thread_loop_lock since it is released only briefly between two loop iterations.
pass
with network_thread_loop_lock:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
try:
asyncore.loop(NetworkThread.poll_timeout, use_poll=True, map=mininode_socket_map, count=1)
except Exception as e:
# All exceptions are caught to prevent them from taking down the network thread.
# Since the error cannot be easily reported, it is just logged assuming that if
# the error is relevant, the test will detect it in some other way.
logger.warning("mininode NetworkThread: asyncore.loop() failed! " + str(e))
logger.debug("Network thread closing")
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
the-stack_0_10320 | import os
import shutil
import click
import jinja2
import pdfkit
import yaml
__author__ = "Kevin Ciarniello"
__copyright__ = "Copyright 2017, Kevin Ciarniello"
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Kevin Ciarniello"
__email__ = "[email protected]"
# Template defaults
defaults = {
'labels': None,
}
def get_theme_directory():
"""
Gets the theme directory
:return: a string of the themes directory
"""
return os.path.abspath('theme')
def read_yaml(filename):
"""
Reads the yaml file in and converts it to a yaml dict
:param filename: the file to convert
:return: a dictionary from the yaml
"""
with open(filename, 'rt') as f:
return yaml.load(f)
def render(filename, variables):
"""
Grabs the jinja2 file and renders it
:param filename: the jinja2 file to render
:param variables:
:return:
"""
with open(filename, 'rt') as f:
filename = jinja2.Template(f.read())
return filename.render(**variables)
def jinja2_files(source, files):
"""
Setup an ignore method for the copy, we want to ignore the .jinja2 files
:param source: the source directory
:param files: all the files from the source directory
:return: a list of files that don't include .jinja2
"""
return [filename for filename in files if filename.endswith('.jinja2')]
def build(data, config, output_dir):
"""
Build the HTML or the PDF to the output_dir
:param data:
:param config:
:param output_dir:
:return:
"""
variables = defaults.copy()
variables.update(data)
variables['config'] = config
# Clean the output directory
shutil.rmtree(output_dir, ignore_errors=True)
# Copy
shutil.copytree(get_theme_directory(), output_dir, ignore=jinja2_files)
# Get all the .jinja2 files
files = jinja2_files(None, os.listdir(get_theme_directory()))
for filename in files:
output_file = os.path.join(get_theme_directory(), filename)
html = render(output_file, variables)
# Create HTML type names
rendered_file = filename.replace('.jinja2', '.html')
# Remove any unusual characters
output_html = html.encode('ascii', 'ignore').decode('ascii')
# Write to the file
with open(os.path.join(output_dir, rendered_file), 'w+') as f:
f.write(output_html)
def generate_html(config, data):
"""
Generate the HTML
:param config:
:param data:
:return:
"""
output_dir = config.get('output_dir', 'build')
build(data, config, output_dir)
def generate_pdf(config, data):
"""
Generate a PDF from the HTML file
:param config:
:param data:
:return:
"""
output_dir = config.get('output_dir', 'build')
filename = config.get('name') + " " + str(config.get('year'))
output_file = os.path.join(output_dir, filename.strip().replace(" ", "-") + '-resume.pdf')
input_file = os.path.join(output_dir, 'index.html')
if not os.path.exists(input_file):
generate_html(config, data)
print(input_file)
if os.path.exists(input_file):
convert_html_to_pdf(input_file, output_file)
def convert_html_to_pdf(source_html, output_filename):
"""
Write the html to a PDF file
:param source_html: the source HTML file
:param output_filename: the output PDF file
:return: the error status
"""
# Generate PDF from a html file.
pdfkit.from_file(source_html, output_filename)
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('resume_file', nargs=1, required=1, type=click.Path())
@click.option('--generate', '-g', default='html',
help="Generate a type [default: html], html or pdf")
@click.option('--directory', '-d', default='build',
help="Output directory for the build files. [default: build]")
def main(resume_file, generate, directory):
"""
Entry function for the script to handle command arguments
and run appropriate build like 'html' and 'pdf'.
"""
# read resume data and config with some defaults
resume_data = read_yaml(resume_file)
config = resume_data.get('config', {})
if directory:
config['output_dir'] = directory
else:
config.setdefault('output_dir', directory)
# build based on the given format
commands = {'html': generate_html, 'pdf': generate_pdf}
return commands[generate](config, resume_data)
if __name__ == '__main__':
main()
|
the-stack_0_10322 | #
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
from decimal import Decimal
from typing import Union
import pytest
from enforce_typing import enforce_types
from ocean_lib.config import Config
from ocean_lib.models.bfactory import BFactory
from ocean_lib.models.bpool import BPool
from ocean_lib.models.btoken import BToken
from ocean_lib.models.test.conftest import alice_info
from ocean_lib.ocean.util import get_bfactory_address
from ocean_lib.web3_internal.currency import to_wei
from ocean_lib.web3_internal.wallet import Wallet
from web3.main import Web3
HUGEINT = 2 ** 255
def test_notokens_basic(
OCEAN_address, network, web3, config, alice_wallet, alice_address
):
"""Tests deployment of a pool without tokens."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
assert not pool.isPublicSwap()
assert not pool.isFinalized()
assert not pool.isBound(OCEAN_address)
assert pool.getNumTokens() == 0
assert pool.getCurrentTokens() == []
with pytest.raises(Exception):
pool.getFinalTokens() # pool's not finalized
assert pool.getSwapFee() == to_wei("1e-6")
assert pool.getController() == alice_address
assert str(pool)
with pytest.raises(Exception):
pool.finalize(from_wallet=alice_wallet) # can't finalize if no tokens
def test_setSwapFee_works(network, config, web3, alice_wallet):
"""Tests that a swap fee can be set on the pool by the controller of that pool."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
pool.setSwapFee(to_wei("0.011"), from_wallet=alice_wallet)
assert pool.getSwapFee() == to_wei("0.011")
def test_setSwapFee_fails(
network, config, web3, alice_wallet, alice_address, bob_wallet, bob_address
):
"""Tests that someone who isn't a controller can not set the swap fee."""
factory = BFactory(web3, get_bfactory_address(config.address_file, network))
pool_address = factory.newBPool(alice_wallet)
pool = BPool(web3, pool_address)
with pytest.raises(Exception):
pool.setSwapFee(
to_wei("0.011"), from_wallet=bob_wallet
) # not ok, bob isn't controller
pool.setController(bob_address, from_wallet=alice_wallet)
pool.setSwapFee(to_wei("0.011"), from_wallet=bob_wallet) # ok now
def test_setController(
network, config, web3, alice_wallet, alice_address, bob_wallet, bob_address
):
"""Tests that the controller of a pool can be changed."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
pool.setController(bob_address, from_wallet=alice_wallet)
assert pool.getController() == bob_address
pool.setController(alice_address, from_wallet=bob_wallet)
assert pool.getController() == alice_address
def test_setPublicSwap(network, config, web3, alice_wallet):
"""Tests that a pool can be set as public."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
pool.setPublicSwap(True, from_wallet=alice_wallet)
assert pool.isPublicSwap()
pool.setPublicSwap(False, from_wallet=alice_wallet)
assert not pool.isPublicSwap()
def test_2tokens_basic(network, config, web3, T1, T2, alice_wallet, alice_address):
"""Tests the deployment of a pool containing 2 tokens (basic happy flow)."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
assert T1.address != T2.address
assert T1.address != pool.address
assert T1.balanceOf(alice_address) >= to_wei(90)
_ = T2.balanceOf(alice_address) >= to_wei(10)
with pytest.raises(Exception): # can't bind until we approve
pool.bind(T1.address, to_wei(90), to_wei(9), from_wallet=alice_wallet)
# Bind two tokens to the pool
T1.approve(pool.address, to_wei(90), from_wallet=alice_wallet)
T2.approve(pool.address, to_wei(10), from_wallet=alice_wallet)
assert T1.allowance(alice_address, pool.address) == to_wei(90)
assert T2.allowance(alice_address, pool.address) == to_wei(10)
assert not pool.isBound(T1.address) and not pool.isBound(T1.address)
pool.bind(T1.address, to_wei(90), to_wei(9), from_wallet=alice_wallet)
pool.bind(T2.address, to_wei(10), to_wei(1), from_wallet=alice_wallet)
assert pool.isBound(T1.address) and pool.isBound(T2.address)
assert pool.getNumTokens() == 2
assert pool.getCurrentTokens() == [T1.address, T2.address]
assert pool.getDenormalizedWeight(T1.address) == to_wei(9)
assert pool.getDenormalizedWeight(T2.address) == to_wei(1)
assert pool.getTotalDenormalizedWeight() == to_wei(10)
assert pool.getNormalizedWeight(T1.address) == to_wei("0.9")
assert pool.getNormalizedWeight(T2.address) == to_wei("0.1")
assert pool.getBalance(T1.address) == to_wei(90)
assert pool.getBalance(T2.address) == to_wei(10)
assert str(pool)
def test_unbind(network, config, web3, T1, T2, alice_wallet):
"""Tests that a pool can be unbound."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 1, 1, 1, 1
)
pool.unbind(T1.address, from_wallet=alice_wallet)
assert pool.getNumTokens() == 1
assert pool.getCurrentTokens() == [T2.address]
assert pool.getBalance(T2.address) == to_wei(1)
def test_finalize(network, config, web3, T1, T2, alice_address, alice_wallet):
"""Tests that a pool containing tokens can be finalized."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
assert not pool.isPublicSwap()
assert not pool.isFinalized()
assert pool.totalSupply() == 0
assert pool.balanceOf(alice_address) == 0
assert pool.allowance(alice_address, pool.address) == 0
pool.finalize(from_wallet=alice_wallet)
assert str(pool) != ""
assert pool.isPublicSwap()
assert pool.isFinalized()
assert pool.totalSupply() == to_wei(100)
assert pool.balanceOf(alice_address) == to_wei(100)
assert pool.allowance(alice_address, pool.address) == 0
assert pool.getFinalTokens() == [T1.address, T2.address]
assert pool.getCurrentTokens() == [T1.address, T2.address]
def test_public_pool(network, config, bob_wallet, alice_ocean):
"""Tests successful transfers inside a public pool."""
alice = alice_info()
alice_address = alice.address
bob_address = bob_wallet.address
T1 = alice.T1
T2 = alice.T2
pool = _createPoolWith2Tokens(
network,
config,
alice_ocean.web3,
alice.T1,
alice.T2,
alice.wallet,
90,
10,
9,
1,
)
BPT = pool
# alice give Bob some tokens
alice.T1.transfer(bob_wallet.address, to_wei(100), from_wallet=alice.wallet)
alice.T2.transfer(bob_wallet.address, to_wei(100), from_wallet=alice.wallet)
# verify holdings
assert alice.T1.balanceOf(alice.address) == to_wei(1000 - 90 - 100) # 810
assert alice.T2.balanceOf(alice.address) == to_wei(1000 - 10 - 100) # 890
assert BPT.balanceOf(alice.address) == to_wei(0)
assert alice.T1.balanceOf(bob_address) == to_wei(100)
assert alice.T2.balanceOf(bob_address) == to_wei(100)
assert BPT.balanceOf(bob_address) == to_wei(0)
assert T1.balanceOf(pool.address) == to_wei(90)
assert T2.balanceOf(pool.address) == to_wei(10)
assert BPT.balanceOf(pool.address) == to_wei(0)
# finalize
pool = BPool(alice_ocean.web3, pool.address)
pool.finalize(from_wallet=alice.wallet)
# verify holdings
assert alice.T1.balanceOf(alice.address) == to_wei(1000 - 90 - 100)
assert alice.T2.balanceOf(alice.address) == to_wei(1000 - 10 - 100)
assert BPT.balanceOf(alice.address) == to_wei(100) # new!
assert T1.balanceOf(pool.address) == to_wei(90)
assert T2.balanceOf(pool.address) == to_wei(10)
assert BPT.balanceOf(pool.address) == to_wei(0)
# bob join pool. Wants 10 BPT
T1.approve(pool.address, to_wei(100), from_wallet=bob_wallet)
T2.approve(pool.address, to_wei(100), from_wallet=bob_wallet)
pool.joinPool(
poolAmountOut=to_wei(10), # 10 BPT
maxAmountsIn=[to_wei(100), to_wei(100)],
from_wallet=bob_wallet,
)
# verify holdings
assert T1.balanceOf(alice_address) == to_wei(1000 - 90 - 100) # 810
assert T2.balanceOf(alice_address) == to_wei(1000 - 10 - 100) # 890
assert BPT.balanceOf(alice_address) == to_wei(100)
assert T1.balanceOf(bob_address) == to_wei(100 - 9) # 91
assert T2.balanceOf(bob_address) == to_wei(100 - 1) # 99
assert BPT.balanceOf(bob_address) == to_wei(10)
assert T1.balanceOf(pool.address) == to_wei(90 + 9) # 99
assert T2.balanceOf(pool.address) == to_wei(10 + 1) # 11
assert BPT.balanceOf(pool.address) == to_wei(0)
# bob sells 2 BPT
# -this is where BLabs fee kicks in. But the fee is currently set to 0.
pool.exitPool(
poolAmountIn=to_wei(2),
minAmountsOut=[to_wei(0), to_wei(0)],
from_wallet=bob_wallet,
)
assert T1.balanceOf(bob_address) == 92800000000000000018 # 92.8
assert T2.balanceOf(bob_address) == 99200000000000000002 # 99.2
assert BPT.balanceOf(bob_address) == to_wei(8)
# bob buys 5 more BPT
pool.joinPool(
poolAmountOut=to_wei(5),
maxAmountsIn=[to_wei(90), to_wei(90)],
from_wallet=bob_wallet,
)
assert BPT.balanceOf(bob_address) == to_wei(13)
# bob fully exits
pool.exitPool(poolAmountIn=to_wei(13), minAmountsOut=[0, 0], from_wallet=bob_wallet)
assert BPT.balanceOf(bob_address) == to_wei(0)
block = alice_ocean.web3.eth.block_number
block_confirmations = alice_ocean.config.block_confirmations.value
join_log = pool.get_join_logs(block - (block_confirmations + 1), block)[0]
assert join_log["args"]["tokenIn"] == T1.address
def test_rebind_more_tokens(network, config, web3, T1, T2, alice_wallet):
"""Tests that we can rebind more tokens on a pool."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
# insufficient allowance
with pytest.raises(Exception):
pool.rebind(T1.address, to_wei(120), to_wei(9), from_wallet=alice_wallet)
# sufficient allowance
T1.approve(pool.address, to_wei(30), from_wallet=alice_wallet)
pool.rebind(T1.address, to_wei(120), to_wei(9), from_wallet=alice_wallet)
def test_gulp(network, config, web3, T1, alice_wallet):
"""Test pool gulp."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
# bind T1 to the pool, with a balance of 2.0
T1.approve(pool.address, to_wei(50), from_wallet=alice_wallet)
pool.bind(T1.address, to_wei(2), to_wei(50), from_wallet=alice_wallet)
# T1 is now pool's (a) ERC20 balance (b) _records[token].balance
assert T1.balanceOf(pool.address) == to_wei(2) # ERC20 balance
assert pool.getBalance(T1.address) == to_wei(2) # records[]
# but then some joker accidentally sends 5.0 tokens to the pool's address
# rather than binding / rebinding. So it's in ERC20 bal but not records[]
T1.transfer(pool.address, to_wei(5), from_wallet=alice_wallet)
assert T1.balanceOf(pool.address) == to_wei(2 + 5) # ERC20 bal
assert pool.getBalance(T1.address) == to_wei(2) # records[]
# so, 'gulp' gets the pool to absorb the tokens into its balances.
# i.e. to update _records[token].balance to be in sync with ERC20 balance
pool.gulp(T1.address, from_wallet=alice_wallet)
assert T1.balanceOf(pool.address) == to_wei(2 + 5) # ERC20
assert pool.getBalance(T1.address) == to_wei(2 + 5) # records[]
def test_spot_price(network, config, web3, T1, T2, alice_wallet):
"""Test calculation of prices on spot."""
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 1, 1, 1, 1
)
assert price_sans_fee == to_wei(1)
assert price == to_wei("1.000001000001000001")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
assert price_sans_fee == to_wei(1)
assert price == to_wei("1.000001000001000001")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 1, 2, 1, 1
)
assert price_sans_fee == to_wei("0.5")
assert price == to_wei("0.500000500000500001")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 2, 1, 1, 1
)
assert price_sans_fee == to_wei(2)
assert price == to_wei("2.000002000002000002")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 9, 10, 9, 1
)
assert price_sans_fee == to_wei("0.1")
assert price == to_wei("0.100000100000100000")
def test_joinSwapExternAmountIn(
network, config, web3, T1, T2, alice_wallet, alice_address
):
"""Tests adding an external amount inside a pool.
When the pool is not public, assert that an Exception is thrown.
When the pool is public, assert that the swap is made and the correct balance remains.
"""
init_T1balance = T1.balanceOf(alice_address)
T2balance = T2.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
T1.approve(pool.address, to_wei(100), from_wallet=alice_wallet)
# pool's not public
with pytest.raises(Exception):
pool.swapExactAmountOut(
tokenIn_address=T1.address,
maxAmountIn=to_wei(100),
tokenOut_address=T2.address,
tokenAmountOut=to_wei(10),
maxPrice=HUGEINT,
from_wallet=alice_wallet,
)
# pool's public
pool.setPublicSwap(True, from_wallet=alice_wallet)
pool.swapExactAmountOut(
tokenIn_address=T1.address,
maxAmountIn=to_wei(100),
tokenOut_address=T2.address,
tokenAmountOut=to_wei(1),
maxPrice=HUGEINT,
from_wallet=alice_wallet,
)
new_balance = init_T1balance - to_wei("91.055")
assert (
new_balance - to_wei("0.005")
<= T1.balanceOf(alice_address)
<= new_balance + to_wei("0.005")
)
assert T2.balanceOf(alice_address) == T2balance - to_wei(9)
block = web3.eth.block_number
block_confirmations = config.block_confirmations.value
swap_log = pool.get_swap_logs(block - (block_confirmations + 1), block)[0]
assert swap_log["args"]["tokenIn"] == T1.address
def test_joinswapPoolAmountOut(
network, config, web3, T1, T2, alice_address, alice_wallet
):
"""Tests taking an amount out of the pool."""
T1balance = T1.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = BPT.balanceOf(alice_address)
T1.approve(pool.address, to_wei(90), from_wallet=alice_wallet)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90)
T1balance = T1.balanceOf(alice_address)
pool.joinswapPoolAmountOut(
tokenIn_address=T1.address,
poolAmountOut=to_wei(10), # BPT wanted
maxAmountIn=to_wei(90), # max T1 to spend
from_wallet=alice_wallet,
)
assert T1.balanceOf(alice_address) >= T1balance - to_wei(90)
assert BPT.balanceOf(alice_address) == pool_balance + to_wei(10)
def test_exitswapPoolAmountIn(
network, config, web3, T1, T2, alice_address, alice_wallet
):
T1balance = T1.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = BPT.balanceOf(alice_address)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90)
pool.exitswapPoolAmountIn(
tokenOut_address=T1.address,
poolAmountIn=to_wei(10), # BPT spent
minAmountOut=to_wei(1), # min T1 wanted
from_wallet=alice_wallet,
)
assert T1.balanceOf(alice_address) >= T1balance - to_wei(90) + to_wei(1)
assert BPT.balanceOf(alice_address) == pool_balance - to_wei(10)
def test_exitswapExternAmountOut(
network, config, web3, T1, T2, alice_address, alice_wallet, alice_ocean
):
T1balance = T1.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = BPT.balanceOf(alice_address)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90)
pool.exitswapExternAmountOut(
tokenOut_address=T1.address,
tokenAmountOut=to_wei(2), # T1 wanted
maxPoolAmountIn=to_wei(10), # max BPT spent
from_wallet=alice_wallet,
)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90) + to_wei(2)
assert BPT.balanceOf(alice_address) >= pool_balance - to_wei(10)
block = alice_ocean.web3.eth.block_number
block_confirmations = config.block_confirmations.value
exit_log = pool.get_exit_logs(block - (block_confirmations + 1), block)[0]
assert exit_log["args"]["tokenOut"] == T1.address
def test_calcSpotPrice(network, config, web3, T1, T2, alice_address, alice_wallet):
"""Tests pricing with calcSpotPrice."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcSpotPrice(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
tokenBalanceOut=to_wei(11),
tokenWeightOut=to_wei(1),
swapFee=0,
)
assert x == to_wei("0.909090909090909091")
def test_calcOutGivenIn(network, config, web3, alice_wallet):
"""Tests pricing with calcOutGivenIn."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcOutGivenIn(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
tokenBalanceOut=to_wei("10.1"),
tokenWeightOut=to_wei(1),
tokenAmountIn=to_wei(1),
swapFee=0,
)
assert x == to_wei("0.918181818181818181")
def test_calcInGivenOut(network, config, web3, alice_wallet):
"""Tests pricing with calcInGivenOut."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcInGivenOut(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
tokenBalanceOut=to_wei("10.1"),
tokenWeightOut=to_wei(1),
tokenAmountOut=to_wei(1),
swapFee=0,
)
assert x == to_wei("1.098901098901098900")
def test_calcPoolOutGivenSingleIn(network, config, web3, alice_wallet):
"""Tests calculations with calcPoolOutGivenSingleIn."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcPoolOutGivenSingleIn(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
poolSupply=to_wei(120),
totalWeight=to_wei(2),
tokenAmountIn=to_wei("0.1"),
swapFee=0,
)
assert x == to_wei("0.598507453453125000")
def test_calcSingleInGivenPoolOut(network, config, web3, alice_wallet):
"""Tests pricing with calcSingleInGivenPoolOut."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcSingleInGivenPoolOut(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
poolSupply=to_wei(120),
totalWeight=to_wei(2),
poolAmountOut=to_wei(10),
swapFee=0,
)
assert x == to_wei("1.736111111111111100")
def test_calcSingleOutGivenPoolIn(network, config, web3, alice_wallet):
"""Tests pricing with calcSingleOutGivenPoolIn."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcSingleOutGivenPoolIn(
tokenBalanceOut=to_wei(10),
tokenWeightOut=to_wei(1),
poolSupply=to_wei(120),
totalWeight=to_wei(2),
poolAmountIn=to_wei(10),
swapFee=0,
)
assert x == to_wei("1.597222222222222220")
def test_calcPoolInGivenSingleOut(network, config, web3, alice_wallet):
"""Tests calculations with calcPoolInGivenSingleOut."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcPoolInGivenSingleOut(
tokenBalanceOut=to_wei(1000),
tokenWeightOut=to_wei(5),
poolSupply=to_wei(100),
totalWeight=to_wei(10),
tokenAmountOut=to_wei("0.1"),
swapFee=0,
)
assert x == to_wei("0.005000125006250000")
@enforce_types
def _createPoolWith2Tokens(
network: str,
config: Config,
web3: Web3,
T1: BToken,
T2: BToken,
wallet: Wallet,
bal1: Union[Decimal, str, int],
bal2: Union[Decimal, str, int],
w1: Union[Decimal, str, int],
w2: Union[Decimal, str, int],
):
"""Helper function to create a basic pool containing 2 tokens."""
pool = _deployBPool(web3, config.address_file, network, wallet)
T1.get_tx_receipt(web3, T1.approve(pool.address, to_wei(bal1), from_wallet=wallet))
T2.get_tx_receipt(web3, T2.approve(pool.address, to_wei(bal2), from_wallet=wallet))
if pool.isBound(T1.address):
pool.unbind(T1.address, wallet)
if pool.isBound(T2.address):
pool.unbind(T2.address, wallet)
pool.bind(T1.address, to_wei(bal1), to_wei(w1), from_wallet=wallet)
pool.bind(T2.address, to_wei(bal2), to_wei(w2), from_wallet=wallet)
return pool
@enforce_types
def _deployBPool(
web3: Web3, address_file: str, network: str, from_wallet: Wallet
) -> BPool:
"""Helper function to deploy a pool."""
factory_address = get_bfactory_address(address_file, network)
factory = BFactory(web3, factory_address)
pool_address = factory.newBPool(from_wallet=from_wallet)
pool = BPool(web3, pool_address)
return pool
@enforce_types
def _spotPrices(
network: str,
config: Config,
web3: Web3,
T1: BToken,
T2: BToken,
wallet: Wallet,
bal1: Union[Decimal, str, int],
bal2: Union[Decimal, str, int],
w1: Union[Decimal, str, int],
w2: Union[Decimal, str, int],
):
"""Helper function to allow for spot price calculations."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, wallet, bal1, bal2, w1, w2
)
a1, a2 = T1.address, T2.address
return (pool.getSpotPrice(a1, a2), pool.getSpotPriceSansFee(a1, a2))
|
the-stack_0_10323 | #!/usr/bin/env python3
import subprocess
import argparse
from pathlib import Path
import re
from statistics import stdev, mean, median_high
from math import floor, ceil
time_parser = re.compile(r'Solution found in (\d+.\d+) ms')
num_runs = 10
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--binary', type=str, help='Path to binary to benchmark', required=True)
parser.add_argument('-i', '--input', type=str, help='Path to inputs for the benchmark. Should contain one sudoku per line.', required=True)
args = parser.parse_args()
if not Path(args.binary).is_file():
print('Argument {} does not specify a valid path to a binary'.format(args.binary))
exit(1)
if not Path(args.input).is_file():
print('Argument {} does not specify a valid path to an input file'.format(args.binary))
exit(2)
def unfurl_line(line):
assert len(line) == 9 * 9
return '\n'.join(line[i:i+9] for i in range(0, 81, 9))
def run_with_input(line):
results = []
for i in range(num_runs):
foo = subprocess.run([args.binary], check=True, input=unfurl_line(line),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
matched = time_parser.match(foo.stderr)
results.append(float(matched.groups()[0]))
return mean(results), stdev(results)
with open(args.input, 'r') as input_file, open('table.md', 'w') as table_file, open('results.csv', 'w') as csv_file:
table_file.write('| Problem | Time taken mean (ms) | Time taken stdev (ms) |\n')
table_file.write('|---------|----------------------|-----------------------|\n')
for idx, line in enumerate(input_file):
line = line.rstrip()
result = run_with_input(line)
table_file.write('| {} | {} | {} |\n'.format(idx, *result))
csv_file.write('{}, {}, {}\n'.format(idx, *result))
print('Problem: {}, mean: {}, stdev: {}'.format(idx, *result))
|
the-stack_0_10324 | """
.. _ex-report:
================================
Make an MNE-Report with a Slider
================================
In this example, MEG evoked data are plotted in an html slider.
"""
# Authors: Teon Brooks <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
###############################################################################
# Do standard folder parsing (this can take a couple of minutes):
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample',
raw_psd=False) # use False for speed here
report.parse_folder(meg_path, on_error='ignore', mri_decim=10)
###############################################################################
# Add a custom section with an evoked slider:
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='png') # can also use 'svg'
# to save report
report.save('my_report.html', overwrite=True)
|
the-stack_0_10326 | # -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <[email protected]>
"""
Defines functions for plotting the results of the identify step.
"""
from functools import singledispatch
import numpy as np
import scipy.linalg as la
from fsc.export import export
from .result import NodalPoint, NodalLine
from .._common_plot import _setup_plot
@export
def result(res, *, axis=None):
"""Plot the result of the identify step.
Arguments
---------
res : IdentificationResultContainer
Result of the identify step.
axis : matplotlib.axes.Axes, optional
Axes on which the result is plotted.
"""
fig, axis, _ = _setup_plot(res.coordinate_system.limits, axis=axis)
feature_size = res.feature_size
for identification_result in res:
shape = identification_result.shape
color = axis._get_lines.get_next_color() # pylint: disable=protected-access
if shape is None:
_plot_positions(
identification_result.positions, axis=axis, color=color
)
else:
_plot_result(
shape, axis=axis, color=color, feature_size=feature_size
)
return fig, axis
def _plot_positions(positions, *, axis, color):
coordinates = list(np.array(list(positions)).T)
axis.scatter(*coordinates, color=color)
@singledispatch
def _plot_result(shape, axis, color, feature_size):
raise NotImplementedError
@export
@_plot_result.register(NodalPoint)
def nodal_point(shape, *, axis, color, feature_size=None):
"""
Plot a nodal point.
Arguments
---------
shape : NodalPoint
Nodal point to be plotted.
axis : matplotlib.axes.Axes
Axes on which to plot.
color : str
Color of the point.
feature_size : float
Distance between two nodal points at which they are considered distinct.
This argument is not used in this function.
"""
coordinates = [[val] for val in shape.position]
axis.scatter(*coordinates, color=color)
@export
@_plot_result.register(NodalLine)
def nodal_line(shape, *, axis, color, feature_size=None):
"""
Plot a nodal line.
Arguments
---------
shape : NodalLine
Nodal line to be plotted.
axis : matplotlib.axes.Axes
Axes on which to plot.
color : str
Color of the nodal line.
feature_size : float
Distance between two nodal points at which they are considered distinct.
Used for cutting the line when it goes across periodic boundaries.
"""
if feature_size is None:
feature_size = np.inf
graph = shape.graph
paths = _get_graph_paths(graph, feature_size=feature_size)
if paths:
for path in paths:
axis.plot(*np.array(path).T, color=color) # pylint: disable=not-an-iterable
else:
axis.scatter(*np.array(list(graph.nodes)).T, color=color) # pylint: disable=not-an-iterable
def _get_graph_paths(graph, feature_size):
"""
Separate a graph into paths, breaking when there is no neighbor or when
passing across the periodic boundary.
"""
working_graph = graph.copy()
paths = []
while working_graph.edges:
curr_node = _get_next_starting_point(working_graph)
curr_path = [curr_node]
while True:
try:
next_node = next(working_graph.neighbors(curr_node))
except StopIteration:
paths.append(curr_path)
break
if la.norm(
np.array(next_node) - np.array(curr_node)
) > 2 * feature_size:
paths.append(curr_path)
curr_path = [next_node]
else:
curr_path.append(next_node)
working_graph.remove_edge(curr_node, next_node)
curr_node = next_node
return paths
def _get_next_starting_point(graph):
nonzero_degree = [(node, degree) for node, degree in graph.degree
if degree > 0]
return min(
nonzero_degree,
key=lambda val: val[1] if val[1] != 2 else float('inf')
)[0]
|
the-stack_0_10328 | import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import Index, Series
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
class TestSeriesQuantile:
def test_quantile(self, datetime_series):
q = datetime_series.quantile(0.1)
assert q == np.percentile(datetime_series.dropna(), 10)
q = datetime_series.quantile(0.9)
assert q == np.percentile(datetime_series.dropna(), 90)
# object dtype
q = Series(datetime_series, dtype=object).quantile(0.9)
assert q == np.percentile(datetime_series.dropna(), 90)
# datetime64[ns] dtype
dts = datetime_series.index.to_series()
q = dts.quantile(0.2)
assert q == Timestamp("2000-01-10 19:12:00")
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(0.25)
assert q == pd.to_timedelta("24:00:00")
# GH7661
result = Series([np.timedelta64("NaT")]).sum()
assert result == pd.Timedelta(0)
msg = "percentiles should all be in the interval \\[0, 1\\]"
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with pytest.raises(ValueError, match=msg):
datetime_series.quantile(invalid)
def test_quantile_multi(self, datetime_series):
qs = [0.1, 0.9]
result = datetime_series.quantile(qs)
expected = pd.Series(
[
np.percentile(datetime_series.dropna(), 10),
np.percentile(datetime_series.dropna(), 90),
],
index=qs,
name=datetime_series.name,
)
tm.assert_series_equal(result, expected)
dts = datetime_series.index.to_series()
dts.name = "xxx"
result = dts.quantile((0.2, 0.2))
expected = Series(
[Timestamp("2000-01-10 19:12:00"), Timestamp("2000-01-10 19:12:00")],
index=[0.2, 0.2],
name="xxx",
)
tm.assert_series_equal(result, expected)
result = datetime_series.quantile([])
expected = pd.Series(
[], name=datetime_series.name, index=Index([], dtype=float)
)
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self, datetime_series):
# see gh-10174
# interpolation = linear (default case)
q = datetime_series.quantile(0.1, interpolation="linear")
assert q == np.percentile(datetime_series.dropna(), 10)
q1 = datetime_series.quantile(0.1)
assert q1 == np.percentile(datetime_series.dropna(), 10)
# test with and without interpolation keyword
assert q == q1
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation="lower")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation="higher")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan], index=[0.2, 0.3]))
@pytest.mark.parametrize(
"case",
[
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
],
[
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
],
[pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],
# NaT
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.NaT,
],
[
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
pd.NaT,
],
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.NaT,
],
],
)
def test_quantile_box(self, case):
s = pd.Series(case, name="XXX")
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name="XXX")
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isna(Series([], dtype="M8[ns]").quantile(0.5))
assert pd.isna(Series([], dtype="m8[ns]").quantile(0.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
@pytest.mark.parametrize(
"values, dtype",
[([0, 0, 0, 1, 2, 3], "Sparse[int]"), ([0.0, None, 1.0, 2.0], "Sparse[float]")],
)
def test_quantile_sparse(self, values, dtype):
ser = pd.Series(values, dtype=dtype)
result = ser.quantile([0.5])
expected = pd.Series(np.asarray(ser)).quantile([0.5])
tm.assert_series_equal(result, expected)
def test_quantile_empty(self):
# floats
s = Series([], dtype="float64")
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype="int64")
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# datetime
s = Series([], dtype="datetime64[ns]")
res = s.quantile(0.5)
assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
tm.assert_series_equal(res, exp)
|
the-stack_0_10333 | ##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Street tests for :mod:`ranger-ims-server.store`
"""
from ims.ext.trial import asyncAsDeferred
from ims.model import Event
from .base import DataStoreTests
__all__ = ()
class DataStoreConcentricStreetTests(DataStoreTests):
"""
Tests for :class:`IMSDataStore` concentric street access.
"""
@asyncAsDeferred
async def test_concentricStreets(self) -> None:
"""
:meth:`IMSDataStore.createConcentricStreet` returns the concentric
streets for the given event.
"""
for event, streetID, streetName in (
(Event(id="Foo"), "A", "Alpha"),
(Event(id="Foo Bar"), "B", "Bravo"),
(Event(id="XYZZY"), "C", "Charlie"),
):
store = await self.store()
await store.createEvent(event)
await store.storeConcentricStreet(event, streetID, streetName)
concentricStreets = await store.concentricStreets(event)
self.assertEqual(len(concentricStreets), 1)
self.assertEqual(concentricStreets.get(streetID), streetName)
@asyncAsDeferred
async def test_createConcentricStreet(self) -> None:
"""
:meth:`IMSDataStore.createConcentricStreet` creates a concentric
streets for the given event.
"""
for event, streetID, streetName in (
(Event(id="Foo"), "A", "Alpha"),
(Event(id="Foo Bar"), "B", "Bravo"),
(Event(id="XYZZY"), "C", "Charlie"),
):
store = await self.store()
await store.createEvent(event)
await store.createConcentricStreet(
event=event, id=streetID, name=streetName
)
stored = await store.concentricStreets(event=event)
self.assertEqual(len(stored), 1)
self.assertEqual(stored.get(streetID), streetName)
|
the-stack_0_10334 | # -*- coding: utf-8 -*-
import os
import sys
import threading
from copy import deepcopy
from tempfile import mkstemp
import six
from six import BytesIO
from .import_bind import PostImportHookPatching
from ..config import running_remotely
from ..debugging.log import LoggerRoot
from ..utilities.resource_monitor import ResourceMonitor
class PatchedMatplotlib:
_patched_original_plot = None
_patched_original_figure = None
_patched_original_savefig = None
__patched_original_imshow = None
__patched_original_draw_all = None
__patched_draw_all_recursion_guard = False
_global_plot_counter = -1
_global_image_counter = -1
_global_image_counter_limit = None
_last_iteration_plot_titles = {}
_current_task = None
_support_image_plot = False
_matplotlylib = None
_plotly_renderer = None
_lock_renderer = threading.RLock()
_recursion_guard = {}
_matplot_major_version = 2
_logger_started_reporting = False
_matplotlib_reported_titles = set()
class _PatchWarnings(object):
def __init__(self):
pass
def warn(self, text, *args, **kwargs):
raise ValueError(text)
def __getattr__(self, item):
def bypass(*args, **kwargs):
pass
return bypass
@staticmethod
def patch_matplotlib():
# only once
if PatchedMatplotlib._patched_original_plot is not None:
return True
# make sure we only patch once
PatchedMatplotlib._patched_original_plot = False
# noinspection PyBroadException
try:
# we support matplotlib version 2.0.0 and above
import matplotlib
PatchedMatplotlib._matplot_major_version = int(matplotlib.__version__.split('.')[0])
if PatchedMatplotlib._matplot_major_version < 2:
LoggerRoot.get_base_logger().warning(
'matplotlib binding supports version 2.0 and above, found version {}'.format(
matplotlib.__version__))
PatchedMatplotlib._patched_original_plot = False
return False
if running_remotely():
# disable GUI backend - make headless
matplotlib.rcParams['backend'] = 'agg'
import matplotlib.pyplot
matplotlib.pyplot.switch_backend('agg')
import matplotlib.pyplot as plt
import matplotlib.figure as figure
if six.PY2:
PatchedMatplotlib._patched_original_plot = staticmethod(plt.show)
PatchedMatplotlib._patched_original_imshow = staticmethod(plt.imshow)
PatchedMatplotlib._patched_original_figure = staticmethod(figure.Figure.show)
PatchedMatplotlib._patched_original_savefig = staticmethod(figure.Figure.savefig)
else:
PatchedMatplotlib._patched_original_plot = plt.show
PatchedMatplotlib._patched_original_imshow = plt.imshow
PatchedMatplotlib._patched_original_figure = figure.Figure.show
PatchedMatplotlib._patched_original_savefig = figure.Figure.savefig
# noinspection PyBroadException
try:
import matplotlib.pylab as pltlab
if plt.show == pltlab.show:
pltlab.show = PatchedMatplotlib.patched_show
if plt.imshow == pltlab.imshow:
pltlab.imshow = PatchedMatplotlib.patched_imshow
except Exception:
pass
plt.show = PatchedMatplotlib.patched_show
figure.Figure.show = PatchedMatplotlib.patched_figure_show
sys.modules['matplotlib'].pyplot.imshow = PatchedMatplotlib.patched_imshow
sys.modules['matplotlib'].figure.Figure.savefig = PatchedMatplotlib.patched_savefig
# patch plotly so we know it failed us.
from plotly.matplotlylib import renderer
renderer.warnings = PatchedMatplotlib._PatchWarnings()
# ignore deprecation warnings from plotly to matplotlib
try:
import warnings
warnings.filterwarnings(action='ignore', category=matplotlib.MatplotlibDeprecationWarning,
module='plotly')
warnings.filterwarnings(action='ignore', category=UserWarning, module='plotly')
except Exception:
pass
except Exception:
return False
# patch IPython matplotlib inline mode
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
ip = get_ipython()
if ip and matplotlib.is_interactive():
# instead of hooking ipython, we should hook the matplotlib
import matplotlib.pyplot as plt
PatchedMatplotlib.__patched_original_draw_all = plt.draw_all
plt.draw_all = PatchedMatplotlib.__patched_draw_all
# ip.events.register('post_execute', PatchedMatplotlib.ipython_post_execute_hook)
except Exception:
pass
# update api version
from ..backend_api import Session
PatchedMatplotlib._support_image_plot = Session.check_min_api_version('2.2')
# create plotly renderer
try:
from plotly import optional_imports
PatchedMatplotlib._matplotlylib = optional_imports.get_module('plotly.matplotlylib')
PatchedMatplotlib._plotly_renderer = PatchedMatplotlib._matplotlylib.PlotlyRenderer()
except Exception:
pass
return True
@staticmethod
def update_current_task(task):
# make sure we have a default vale
if PatchedMatplotlib._global_image_counter_limit is None:
from ..config import config
PatchedMatplotlib._global_image_counter_limit = config.get('metric.matplotlib_untitled_history_size', 100)
# if we already patched it, just update the current task
if PatchedMatplotlib._patched_original_plot is not None:
PatchedMatplotlib._current_task = task
# if matplotlib is not loaded yet, get a callback hook
elif not running_remotely() and \
('matplotlib.pyplot' not in sys.modules and 'matplotlib.pylab' not in sys.modules):
PatchedMatplotlib._current_task = task
PostImportHookPatching.add_on_import('matplotlib.pyplot', PatchedMatplotlib.patch_matplotlib)
PostImportHookPatching.add_on_import('matplotlib.pylab', PatchedMatplotlib.patch_matplotlib)
elif PatchedMatplotlib.patch_matplotlib():
PatchedMatplotlib._current_task = task
@staticmethod
def patched_imshow(*args, **kw):
ret = PatchedMatplotlib._patched_original_imshow(*args, **kw)
try:
from matplotlib import _pylab_helpers
# store on the plot that this is an imshow plot
stored_figure = _pylab_helpers.Gcf.get_active()
if stored_figure:
stored_figure._trains_is_imshow = 1 if not hasattr(stored_figure, '_trains_is_imshow') \
else stored_figure._trains_is_imshow + 1
except Exception:
pass
return ret
@staticmethod
def patched_savefig(self, *args, **kw):
ret = PatchedMatplotlib._patched_original_savefig(self, *args, **kw)
# noinspection PyBroadException
try:
fname = kw.get('fname') or args[0]
from pathlib2 import Path
if six.PY3:
from pathlib import Path as Path3
else:
Path3 = Path
# if we are not storing into a file (str/Path) do not log the matplotlib
if not isinstance(fname, (str, Path, Path3)):
return ret
except Exception:
pass
tid = threading._get_ident() if six.PY2 else threading.get_ident()
if not PatchedMatplotlib._recursion_guard.get(tid):
PatchedMatplotlib._recursion_guard[tid] = True
# noinspection PyBroadException
try:
PatchedMatplotlib._report_figure(specific_fig=self, set_active=False)
except Exception:
pass
PatchedMatplotlib._recursion_guard[tid] = False
return ret
@staticmethod
def patched_figure_show(self, *args, **kw):
tid = threading._get_ident() if six.PY2 else threading.get_ident()
if PatchedMatplotlib._recursion_guard.get(tid):
# we are inside a gaurd do nothing
return PatchedMatplotlib._patched_original_figure(self, *args, **kw)
PatchedMatplotlib._recursion_guard[tid] = True
PatchedMatplotlib._report_figure(set_active=False, specific_fig=self)
ret = PatchedMatplotlib._patched_original_figure(self, *args, **kw)
PatchedMatplotlib._recursion_guard[tid] = False
return ret
@staticmethod
def patched_show(*args, **kw):
tid = threading._get_ident() if six.PY2 else threading.get_ident()
PatchedMatplotlib._recursion_guard[tid] = True
# noinspection PyBroadException
try:
figures = PatchedMatplotlib._get_output_figures(None, all_figures=True)
for figure in figures:
# if this is a stale figure (just updated) we should send it, the rest will not be stale
if figure.canvas.figure.stale or (hasattr(figure, '_trains_is_imshow') and figure._trains_is_imshow):
PatchedMatplotlib._report_figure(stored_figure=figure)
except Exception:
pass
ret = PatchedMatplotlib._patched_original_plot(*args, **kw)
if PatchedMatplotlib._current_task and sys.modules['matplotlib'].rcParams['backend'] == 'agg':
# clear the current plot, because no one else will
# noinspection PyBroadException
try:
if sys.modules['matplotlib'].rcParams['backend'] == 'agg':
import matplotlib.pyplot as plt
plt.clf()
except Exception:
pass
PatchedMatplotlib._recursion_guard[tid] = False
return ret
@staticmethod
def _report_figure(force_save_as_image=False, stored_figure=None, set_active=True, specific_fig=None):
if not PatchedMatplotlib._current_task:
return
# noinspection PyBroadException
try:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
from plotly.io import templates
if specific_fig is None:
# store the figure object we just created (if it is not already there)
stored_figure = stored_figure or _pylab_helpers.Gcf.get_active()
if not stored_figure:
# nothing for us to do
return
# check if this is an imshow
if hasattr(stored_figure, '_trains_is_imshow'):
# flag will be cleared when calling clf() (object will be replaced)
stored_figure._trains_is_imshow = max(0, stored_figure._trains_is_imshow - 1)
force_save_as_image = True
# get current figure
mpl_fig = stored_figure.canvas.figure # plt.gcf()
else:
mpl_fig = specific_fig
# convert to plotly
image = None
plotly_fig = None
image_format = 'jpeg'
fig_dpi = 300
if force_save_as_image:
# if this is an image, store as is.
fig_dpi = None
else:
image_format = 'svg'
# protect with lock, so we support multiple threads using the same renderer
PatchedMatplotlib._lock_renderer.acquire()
# noinspection PyBroadException
try:
def our_mpl_to_plotly(fig):
if not PatchedMatplotlib._matplotlylib or not PatchedMatplotlib._plotly_renderer:
return None
plotly_renderer = PatchedMatplotlib._matplotlylib.PlotlyRenderer()
PatchedMatplotlib._matplotlylib.Exporter(plotly_renderer, close_mpl=False).run(fig)
x_ticks = list(plotly_renderer.current_mpl_ax.get_xticklabels())
if x_ticks:
# noinspection PyBroadException
try:
# check if all values can be cast to float
[float(t.get_text().replace('−', '-')) for t in x_ticks]
except Exception:
# noinspection PyBroadException
try:
plotly_renderer.plotly_fig['layout']['xaxis1'].update({
'ticktext': [t.get_text() for t in x_ticks],
'tickvals': [t.get_position()[0] for t in x_ticks],
})
except Exception:
pass
y_ticks = list(plotly_renderer.current_mpl_ax.get_yticklabels())
if y_ticks:
# noinspection PyBroadException
try:
# check if all values can be cast to float
_ = [float(t.get_text().replace('−', '-')) for t in y_ticks]
except Exception:
# noinspection PyBroadException
try:
plotly_renderer.plotly_fig['layout']['yaxis1'].update({
'ticktext': [t.get_text() for t in y_ticks],
'tickvals': [t.get_position()[1] for t in y_ticks],
})
except Exception:
pass
return deepcopy(plotly_renderer.plotly_fig)
plotly_fig = our_mpl_to_plotly(mpl_fig)
# noinspection PyBroadException
try:
if 'none' in templates:
plotly_fig._layout_obj.template = templates['none']
except Exception:
pass
except Exception as ex:
# this was an image, change format to png
image_format = 'jpeg' if 'selfie' in str(ex) else 'png'
fig_dpi = 300
finally:
PatchedMatplotlib._lock_renderer.release()
# plotly could not serialize the plot, we should convert to image
if not plotly_fig:
plotly_fig = None
# noinspection PyBroadException
try:
# first try SVG if we fail then fallback to png
buffer_ = BytesIO()
a_plt = specific_fig if specific_fig is not None else plt
if PatchedMatplotlib._matplot_major_version < 3:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
frameon=False)
else:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
facecolor=None)
buffer_.seek(0)
except Exception:
image_format = 'png'
buffer_ = BytesIO()
a_plt = specific_fig if specific_fig is not None else plt
if PatchedMatplotlib._matplot_major_version < 3:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
frameon=False)
else:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
facecolor=None)
buffer_.seek(0)
fd, image = mkstemp(suffix='.' + image_format)
os.write(fd, buffer_.read())
os.close(fd)
# check if we need to restore the active object
if set_active and not _pylab_helpers.Gcf.get_active():
_pylab_helpers.Gcf.set_active(stored_figure)
# get the main task
reporter = PatchedMatplotlib._current_task.reporter
if reporter is not None:
if mpl_fig.texts:
plot_title = mpl_fig.texts[0].get_text()
else:
gca = mpl_fig.gca()
plot_title = gca.title.get_text() if gca.title else None
# remove borders and size, we should let the web take care of that
if plotly_fig:
last_iteration = PatchedMatplotlib._get_last_iteration()
if plot_title:
title = PatchedMatplotlib._enforce_unique_title_per_iteration(plot_title, last_iteration)
else:
PatchedMatplotlib._global_plot_counter += 1
title = 'untitled %02d' % PatchedMatplotlib._global_plot_counter
plotly_fig.layout.margin = {}
plotly_fig.layout.autosize = True
plotly_fig.layout.height = None
plotly_fig.layout.width = None
# send the plot event
plotly_dict = plotly_fig.to_plotly_json()
if not plotly_dict.get('layout'):
plotly_dict['layout'] = {}
plotly_dict['layout']['title'] = title
PatchedMatplotlib._matplotlib_reported_titles.add(title)
reporter.report_plot(title=title, series='plot', plot=plotly_dict, iter=last_iteration)
else:
logger = PatchedMatplotlib._current_task.get_logger()
# this is actually a failed plot, we should put it under plots:
# currently disabled
if force_save_as_image or not PatchedMatplotlib._support_image_plot:
last_iteration = PatchedMatplotlib._get_last_iteration()
# send the plot as image
if plot_title:
title = PatchedMatplotlib._enforce_unique_title_per_iteration(plot_title, last_iteration)
else:
PatchedMatplotlib._global_image_counter += 1
title = 'untitled %02d' % (PatchedMatplotlib._global_image_counter %
PatchedMatplotlib._global_image_counter_limit)
PatchedMatplotlib._matplotlib_reported_titles.add(title)
logger.report_image(title=title, series='plot image', local_path=image,
delete_after_upload=True, iteration=last_iteration)
else:
# send the plot as plotly with embedded image
last_iteration = PatchedMatplotlib._get_last_iteration()
if plot_title:
title = PatchedMatplotlib._enforce_unique_title_per_iteration(plot_title, last_iteration)
else:
PatchedMatplotlib._global_plot_counter += 1
title = 'untitled %02d' % (PatchedMatplotlib._global_plot_counter %
PatchedMatplotlib._global_image_counter_limit)
PatchedMatplotlib._matplotlib_reported_titles.add(title)
# noinspection PyProtectedMember
logger._report_image_plot_and_upload(
title=title, series='plot image', path=image,
delete_after_upload=True, iteration=last_iteration)
except Exception:
# plotly failed
pass
return
@staticmethod
def _enforce_unique_title_per_iteration(title, last_iteration):
# type: (str, int) -> str
"""
Matplotlib with specific title will reset the title counter on every new iteration.
Calling title twice each iteration will produce "title" and "title/1" for every iteration
:param title: original matplotlib title
:param last_iteration: the current "last_iteration"
:return: new title to use (with counter attached if necessary)
"""
# check if we already encountered the title
if title in PatchedMatplotlib._last_iteration_plot_titles:
# if we have check the last iteration
title_last_iteration, title_counter = PatchedMatplotlib._last_iteration_plot_titles[title]
# if this is a new iteration start from the beginning
if last_iteration == title_last_iteration:
title_counter += 1
else: # if this is a new iteration start from the beginning
title_last_iteration = last_iteration
title_counter = 0
else:
# this is a new title
title_last_iteration = last_iteration
title_counter = 0
base_title = title
# if this is the zero counter to not add the counter to the title
if title_counter != 0:
title = base_title + '/%d' % title_counter
# update back the title iteration counter
PatchedMatplotlib._last_iteration_plot_titles[base_title] = (title_last_iteration, title_counter)
return title
@staticmethod
def _get_output_figures(stored_figure, all_figures):
try:
from matplotlib import _pylab_helpers
if all_figures:
return list(_pylab_helpers.Gcf.figs.values())
else:
return [stored_figure] or [_pylab_helpers.Gcf.get_active()]
except Exception:
return []
@staticmethod
def __patched_draw_all(*args, **kwargs):
recursion_guard = PatchedMatplotlib.__patched_draw_all_recursion_guard
if not recursion_guard:
PatchedMatplotlib.__patched_draw_all_recursion_guard = True
ret = PatchedMatplotlib.__patched_original_draw_all(*args, **kwargs)
if not recursion_guard:
PatchedMatplotlib.ipython_post_execute_hook()
PatchedMatplotlib.__patched_draw_all_recursion_guard = False
return ret
@staticmethod
def _get_last_iteration():
if PatchedMatplotlib._logger_started_reporting:
return PatchedMatplotlib._current_task.get_last_iteration()
# get the reported plot titles (exclude us)
reported_titles = ResourceMonitor.get_logger_reported_titles(PatchedMatplotlib._current_task)
if not reported_titles:
return 0
# check that this is not only us
if not (set(reported_titles) - PatchedMatplotlib._matplotlib_reported_titles):
return 0
# mark reporting started
PatchedMatplotlib._logger_started_reporting = True
return PatchedMatplotlib._current_task.get_last_iteration()
@staticmethod
def ipython_post_execute_hook():
# noinspection PyBroadException
try:
from matplotlib import _pylab_helpers
for i, f_mgr in enumerate(_pylab_helpers.Gcf.get_all_fig_managers()):
if not f_mgr.canvas.figure.stale:
PatchedMatplotlib._report_figure(stored_figure=f_mgr)
except Exception:
pass
|
the-stack_0_10339 | # -*- coding:utf-8 -*-
# There are two sorted arrays nums1 and nums2 of size m and n respectively.
#
# Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
#
# Example 1:
#
# nums1 = [1, 3]
# nums2 = [2]
#
# The median is 2.0
#
#
#
# Example 2:
#
# nums1 = [1, 2]
# nums2 = [3, 4]
#
# The median is (2 + 3)/2 = 2.5
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums = sorted(nums1 + nums2)
t_len = len(nums)
if t_len == 1:
return nums[0]
if t_len % 2:
return nums[t_len/2]
else:
return (nums[t_len/2] + nums[t_len/2 -1]) /2.0
|
the-stack_0_10340 | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import product
from unittest import mock
import torch
from botorch.exceptions.errors import BotorchError
from botorch.utils.multi_objective.box_decompositions.box_decomposition import (
BoxDecomposition,
FastPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.utils import (
update_local_upper_bounds_incremental,
)
from botorch.utils.testing import BotorchTestCase
class DummyBoxDecomposition(BoxDecomposition):
def _partition_space(self):
pass
def compute_hypervolume(self):
pass
def get_hypercell_bounds(self):
pass
class DummyFastPartitioning(FastPartitioning, DummyBoxDecomposition):
def _get_partitioning(self):
pass
def _get_single_cell(self):
pass
class TestBoxDecomposition(BotorchTestCase):
def setUp(self):
self.ref_point_raw = torch.zeros(3, device=self.device)
self.Y_raw = torch.tensor(
[
[1.0, 2.0, 1.0],
[1.0, 1.0, 1.0],
[2.0, 0.5, 1.0],
],
device=self.device,
)
self.pareto_Y_raw = torch.tensor(
[
[1.0, 2.0, 1.0],
[2.0, 0.5, 1.0],
],
device=self.device,
)
def test_box_decomposition(self):
with self.assertRaises(TypeError):
BoxDecomposition()
for dtype, m, sort in product(
(torch.float, torch.double), (2, 3), (True, False)
):
with mock.patch.object(
DummyBoxDecomposition,
"_partition_space_2d" if m == 2 else "_partition_space",
) as mock_partition_space:
ref_point = self.ref_point_raw[:m].to(dtype=dtype)
Y = self.Y_raw[:, :m].to(dtype=dtype)
pareto_Y = self.pareto_Y_raw[:, :m].to(dtype=dtype)
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort)
# test pareto_Y before it is initialized
with self.assertRaises(BotorchError):
bd.pareto_Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort, Y=Y)
mock_partition_space.assert_called_once()
# test attributes
expected_pareto_Y = (
pareto_Y[torch.argsort(-pareto_Y[:, 0])] if sort else pareto_Y
)
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, Y))
self.assertTrue(torch.equal(bd._neg_Y, -Y))
self.assertTrue(torch.equal(bd._neg_pareto_Y, -expected_pareto_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
self.assertTrue(torch.equal(bd._neg_ref_point, -ref_point))
self.assertEqual(bd.num_outcomes, m)
# test empty Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort, Y=Y[:0])
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y[:0]))
# test _update_neg_Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort)
bd._update_neg_Y(Y[:2])
self.assertTrue(torch.equal(bd._neg_Y, -Y[:2]))
bd._update_neg_Y(Y[2:])
self.assertTrue(torch.equal(bd._neg_Y, -Y))
# test batch mode
if m == 2:
batch_Y = torch.stack([Y, Y + 1], dim=0)
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y
)
batch_expected_pareto_Y = torch.stack(
[expected_pareto_Y, expected_pareto_Y + 1], dim=0
)
self.assertTrue(torch.equal(bd.pareto_Y, batch_expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, batch_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
# test batch ref point
batch_ref_point = torch.stack([ref_point, ref_point + 1], dim=0)
bd = DummyBoxDecomposition(
ref_point=batch_ref_point, sort=sort, Y=batch_Y
)
self.assertTrue(torch.equal(bd.ref_point, batch_ref_point))
# test multiple batch dims
with self.assertRaises(NotImplementedError):
DummyBoxDecomposition(
ref_point=ref_point,
sort=sort,
Y=batch_Y.unsqueeze(0),
)
# test empty Y
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y[:, :0]
)
self.assertTrue(
torch.equal(bd.pareto_Y, batch_expected_pareto_Y[:, :0])
)
# test padded pareto frontiers with different numbers of
# points
batch_Y[1, 1] = batch_Y[1, 0] - 1
batch_Y[1, 2] = batch_Y[1, 0] - 2
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y
)
batch_expected_pareto_Y = torch.stack(
[
expected_pareto_Y,
batch_Y[1, :1].expand(expected_pareto_Y.shape),
],
dim=0,
)
self.assertTrue(torch.equal(bd.pareto_Y, batch_expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, batch_Y))
else:
with self.assertRaises(NotImplementedError):
DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=Y.unsqueeze(0)
)
def test_fast_partitioning(self):
with self.assertRaises(TypeError):
FastPartitioning()
for dtype, m in product(
(torch.float, torch.double),
(2, 3),
):
ref_point = self.ref_point_raw[:m].to(dtype=dtype)
Y = self.Y_raw[:, :m].to(dtype=dtype)
pareto_Y = self.pareto_Y_raw[:, :m].to(dtype=dtype)
sort = m == 2
expected_pareto_Y = (
pareto_Y[torch.argsort(-pareto_Y[:, 0])] if sort else pareto_Y
)
bd = DummyFastPartitioning(ref_point=ref_point, Y=Y)
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, Y))
self.assertTrue(torch.equal(bd._neg_Y, -Y))
self.assertTrue(torch.equal(bd._neg_pareto_Y, -expected_pareto_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
self.assertTrue(torch.equal(bd._neg_ref_point, -ref_point))
self.assertEqual(bd.num_outcomes, m)
# test update
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"reset",
wraps=bd.reset,
) as mock_reset:
# with no existing neg_Y
bd.update(Y=Y[:2])
mock_reset.assert_called_once()
# test with existing Y
bd.update(Y=Y[2:])
# check that reset is only called when m=2
if m == 2:
mock_reset.assert_has_calls([mock.call(), mock.call()])
else:
mock_reset.assert_called_once()
# with existing neg_Y, and empty pareto_Y
bd = DummyFastPartitioning(ref_point=ref_point, Y=Y[:0])
with mock.patch.object(
DummyFastPartitioning,
"reset",
wraps=bd.reset,
) as mock_reset:
bd.update(Y=Y[0:])
mock_reset.assert_called_once()
# test empty pareto Y
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"_get_single_cell",
wraps=bd._get_single_cell,
) as mock_get_single_cell:
bd.update(Y=Y[:0])
mock_get_single_cell.assert_called_once()
# test batched empty pareto Y
if m == 2:
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"_get_single_cell",
wraps=bd._get_single_cell,
) as mock_get_single_cell:
bd.update(Y=Y.unsqueeze(0)[:, :0])
mock_get_single_cell.assert_called_once()
# test that update_local_upper_bounds_incremental is called when m>2
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch(
"botorch.utils.multi_objective.box_decompositions.box_decomposition."
"update_local_upper_bounds_incremental",
wraps=update_local_upper_bounds_incremental,
) as mock_update_local_upper_bounds_incremental, mock.patch.object(
DummyFastPartitioning,
"_get_partitioning",
wraps=bd._get_partitioning,
) as mock_get_partitioning, mock.patch.object(
DummyFastPartitioning,
"_partition_space_2d",
):
bd.update(Y=Y)
if m > 2:
mock_update_local_upper_bounds_incremental.assert_called_once()
# check that it is not called if the pareto set does not change
bd.update(Y=Y)
mock_update_local_upper_bounds_incremental.assert_called_once()
mock_get_partitioning.assert_called_once()
else:
self.assertEqual(
len(mock_update_local_upper_bounds_incremental.call_args_list),
0,
)
# test exception is raised for m=2, batched box decomposition using
# _partition_space
if m == 2:
with self.assertRaises(NotImplementedError):
DummyFastPartitioning(ref_point=ref_point, Y=Y.unsqueeze(0))
|
the-stack_0_10341 | """Exceptions raised by the dvc."""
from funcy import first
from dvc.utils import error_link, format_link, relpath
class DvcException(Exception):
"""Base class for all dvc exceptions."""
def __init__(self, msg, *args):
assert msg
super().__init__(msg, *args)
class InvalidArgumentError(ValueError, DvcException):
"""Thrown if arguments are invalid."""
class OutputDuplicationError(DvcException):
"""Thrown if a file/directory is specified as an output in more than one
stage.
Args:
output (unicode): path to the file/directory.
stages (list): list of paths to stages.
"""
def __init__(self, output, stages):
assert isinstance(output, str)
assert all(hasattr(stage, "relpath") for stage in stages)
if len(stages) == 1:
msg = "output '{}' is already specified in {}.".format(
output, first(stages)
)
else:
msg = "output '{}' is already specified in stages:\n{}".format(
output, "\n".join(f"\t- {s.addressing}" for s in stages),
)
super().__init__(msg)
self.stages = stages
self.output = output
class OutputNotFoundError(DvcException):
"""Thrown if a file/directory is not found as an output in any pipeline.
Args:
output (unicode): path to the file/directory.
"""
def __init__(self, output, repo=None):
self.output = output
self.repo = repo
super().__init__(
"Unable to find DVC-file with output '{path}'".format(
path=relpath(self.output)
)
)
class StagePathAsOutputError(DvcException):
"""Thrown if directory that stage is going to be saved in is specified as
an output of another stage.
Args:
stage (Stage): a stage that is in some other stages output
output (str): an output covering the stage above
"""
def __init__(self, stage, output):
assert isinstance(output, str)
super().__init__(
"{stage} is within an output '{output}' of another stage".format(
stage=stage, output=output
)
)
class CircularDependencyError(DvcException):
"""Thrown if a file/directory specified both as an output and as a
dependency.
Args:
dependency (str): path to the dependency.
"""
def __init__(self, dependency):
assert isinstance(dependency, str)
msg = "'{}' is specified as an output and as a dependency."
super().__init__(msg.format(dependency))
class ArgumentDuplicationError(DvcException):
"""Thrown if a file/directory is specified as a dependency/output more
than once.
Args:
path (str): path to the file/directory.
"""
def __init__(self, path):
assert isinstance(path, str)
super().__init__(f"file '{path}' is specified more than once.")
class MoveNotDataSourceError(DvcException):
"""Thrown when trying to move a file/directory that is not an output
in a data source stage.
Args:
path (str): path to the file/directory.
"""
def __init__(self, path):
msg = (
"move is not permitted for stages that are not data sources. "
"You need to either move '{path}' to a new location and edit "
"it by hand, or remove '{path}' and create a new one at the "
"desired location."
)
super().__init__(msg.format(path=path))
class NotDvcRepoError(DvcException):
"""Thrown if a directory is not a DVC repo"""
class DvcParserError(DvcException):
"""Base class for CLI parser errors."""
def __init__(self):
super().__init__("parser error")
class CyclicGraphError(DvcException):
def __init__(self, stages):
assert isinstance(stages, list)
msg = "Pipeline has a cycle involving: {}.".format(
", ".join(s.addressing for s in stages)
)
super().__init__(msg)
class ConfirmRemoveError(DvcException):
def __init__(self, path):
super().__init__(
"unable to remove '{}' without a confirmation. Use "
"`-f` to force.".format(path)
)
class InitError(DvcException):
pass
class ReproductionError(DvcException):
def __init__(self, dvc_file_name):
self.path = dvc_file_name
super().__init__(f"failed to reproduce '{dvc_file_name}'")
class BadMetricError(DvcException):
def __init__(self, paths):
super().__init__(
"the following metrics do not exist, "
"are not metrics files or are malformed: {paths}".format(
paths=", ".join(f"'{path}'" for path in paths)
)
)
class NoMetricsError(DvcException):
pass
class NoMetricsParsedError(NoMetricsError):
def __init__(self, command):
super().__init__(
f"Could not parse {command} files. Use `-v` option to see more "
"details."
)
class NoMetricsFoundError(NoMetricsError):
def __init__(self, command, run_options):
super().__init__(
f"No {command} files in this repository. "
f"Use `{run_options}` options for "
f"`dvc run` to mark stage outputs as {command}."
)
class RecursiveAddingWhileUsingFilename(DvcException):
def __init__(self):
super().__init__(
"cannot use `fname` with multiple targets or `-R|--recursive`"
)
class OverlappingOutputPathsError(DvcException):
def __init__(self, parent, overlapping_out, message):
self.parent = parent
self.overlapping_out = overlapping_out
super().__init__(message)
class CheckoutErrorSuggestGit(DvcException):
def __init__(self, target):
super().__init__(f"Did you mean `git checkout {target}`?")
class ETagMismatchError(DvcException):
def __init__(self, etag, cached_etag):
super().__init__(
"ETag mismatch detected when copying file to cache! "
"(expected: '{}', actual: '{}')".format(etag, cached_etag)
)
class FileMissingError(DvcException):
def __init__(self, path, hint=None):
self.path = path
hint = "" if hint is None else f". {hint}"
super().__init__(
f"Can't find '{path}' neither locally nor on remote{hint}"
)
class DvcIgnoreInCollectedDirError(DvcException):
def __init__(self, ignore_dirname):
super().__init__(
".dvcignore file should not be in collected dir path: "
"'{}'".format(ignore_dirname)
)
class GitHookAlreadyExistsError(DvcException):
def __init__(self, hook_name):
super().__init__(
"Hook '{}' already exists. Please refer to {} for more "
"info.".format(
hook_name, format_link("https://man.dvc.org/install")
)
)
class DownloadError(DvcException):
def __init__(self, amount):
self.amount = amount
super().__init__(f"{amount} files failed to download")
class UploadError(DvcException):
def __init__(self, amount):
self.amount = amount
super().__init__(f"{amount} files failed to upload")
class CheckoutError(DvcException):
def __init__(self, target_infos, stats=None):
self.target_infos = target_infos
self.stats = stats
targets = [str(t) for t in target_infos]
m = (
"Checkout failed for following targets:\n{}\nIs your "
"cache up to date?\n{}".format(
"\n".join(targets), error_link("missing-files"),
)
)
super().__init__(m)
class CollectCacheError(DvcException):
pass
class NoRemoteInExternalRepoError(DvcException):
def __init__(self, url):
super().__init__(
f"No DVC remote is specified in target repository '{url}'."
)
class NoOutputInExternalRepoError(DvcException):
def __init__(self, path, external_repo_path, external_repo_url):
super().__init__(
"Output '{}' not found in target repository '{}'".format(
relpath(path, external_repo_path), external_repo_url
)
)
class HTTPError(DvcException):
def __init__(self, code, reason):
super().__init__(f"'{code} {reason}'")
class PathMissingError(DvcException):
default_msg = (
"The path '{}' does not exist in the target repository '{}'"
" neither as a DVC output nor as a Git-tracked file."
)
default_msg_dvc_only = (
"The path '{}' does not exist in the target repository '{}'"
" as an DVC output."
)
def __init__(self, path, repo, dvc_only=False):
msg = self.default_msg if not dvc_only else self.default_msg_dvc_only
super().__init__(msg.format(path, repo))
self.dvc_only = dvc_only
class RemoteCacheRequiredError(DvcException):
def __init__(self, path_info):
super().__init__(
(
"Current operation was unsuccessful because '{}' requires "
"existing cache on '{}' remote. See {} for information on how "
"to set up remote cache."
).format(
path_info,
path_info.scheme,
format_link("https://man.dvc.org/config#cache"),
)
)
class IsADirectoryError(DvcException): # noqa,pylint:disable=redefined-builtin
"""Raised when a file operation is requested on a directory."""
class NoOutputOrStageError(DvcException):
"""
Raised when the target is neither an output nor a stage name in dvc.yaml
"""
def __init__(self, target, file):
super().__init__(
f"'{target}' "
f"does not exist as an output or a stage name in '{file}'"
)
class MergeError(DvcException):
pass
class CacheLinkError(DvcException):
SUPPORT_LINK = "See {} for more information.".format(
format_link(
"https://dvc.org/doc/user-guide/troubleshooting#cache-types"
)
)
def __init__(self, path_infos):
msg = "No possible cache link types for '{}'. {}".format(
", ".join([str(path) for path in path_infos]), self.SUPPORT_LINK,
)
super().__init__(msg)
self.path_infos = path_infos
|
the-stack_0_10342 | import sys
sys.path.append('..')
from inner import class_inner
class EarthPoint :
latitude : float
longitude : float
def __init__(self,latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def __str__(self):
fmt = self.formatter()
return fmt.as_str(self)
@class_inner
class formatter:
def as_str(self, v):
ns,ew = "NS"[v.latitude<0],"EW"[v.longitude<0]
return f"{abs(v.latitude):.4f}{ns} {abs(v.longitude):.4f}{ew}"
def _parse(self, s, card):
value,c = float(s[:-1]), s[-1].upper()
sign =(1,-1)[card.index(c)]
return sign*value
def from_str(self , geostr):
s = geostr.split()
if len(s)!=2:
raise ValueError("invalid string")
latitude = self._parse(s[0], "NS")
longitude = self._parse(s[1], "EW")
return self.outer(latitude, longitude)
# formatting
Paris = EarthPoint(48.866667, 2.333333)
print(str(Paris))
# parsing
fmt = EarthPoint.formatter()
geo = fmt.from_str('48.8667N 2.3333E')
print(geo)
|
the-stack_0_10343 | import collections
import signal
from django.template import Template, Context
from . import flamegraph
try:
from debug_toolbar.panels import Panel
except ImportError as e:
import os
if os.environ.get('TESTING'):
import mock
Panel = mock.Mock()
else:
raise e
template = r"""
<style>
#FlamegraphPanel .djDebugPanelContent { padding:0; }
</style>
<template id="djdt-flamegraph-tpl">
<style>
body {margin: 0;}
</style>
{{ flamegraph|safe }}
<script>
init();
</script>
</template>
<iframe id="djdt-flamegraph-iframe" style="width:100%;height:100%;">
</iframe>
"""
from django.templatetags.static import static
class FlamegraphPanel(Panel):
title = 'Flamegraph'
template = 'djdt_flamegraph.html'
@property
def enabled(self):
key = 'djdt' + self.panel_id
return self.toolbar.request.COOKIES.get(key, 'off') == 'on'
@property
def content(self):
return Template(template).render(Context({
'flamegraph': flamegraph.stats_to_svg(self.sampler.get_stats())
}))
@property
def scripts(self):
scripts = super().scripts
scripts.append(static("djdt_flamegraph/djdt_flamegraph.js"))
return scripts
def enable_instrumentation(self):
self.sampler = Sampler()
def process_request(self, request):
self.sampler.start()
response = super().process_request(request)
self.sampler.stop()
return response
class Sampler(object):
def __init__(self, interval=0.001):
self.stack_counts = collections.defaultdict(int)
self.interval = interval
def _sample(self, signum, frame):
stack = []
while frame is not None:
formatted_frame = '{}({})'.format(frame.f_code.co_name,
frame.f_globals.get('__name__'))
stack.append(formatted_frame)
frame = frame.f_back
formatted_stack = ';'.join(reversed(stack))
self.stack_counts[formatted_stack] += 1
def get_stats(self):
return '\n'.join('%s %d' % (key, value) for key, value in sorted(self.stack_counts.items()))
def start(self):
signal.signal(signal.SIGALRM, self._sample)
signal.setitimer(signal.ITIMER_REAL, self.interval, self.interval)
def stop(self):
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
the-stack_0_10346 | """
# This script, search threshold for SparNet model by computing accuracy
# It also compute flops for SparNet model
#
# See Table 3 on the main paper
#
Example usage:
CUDA_VISIBLE_DEVICES=1 python3 -m evaluation.seach_sparnet_th --settings_file config/settings_ncaltech.yaml
CUDA_VISIBLE_DEVICES=1 python3 -m evaluation.seach_sparnet_th --settings_file config/settings_prophesee.yaml
CUDA_VISIBLE_DEVICES=0 python3 -m evaluation.seach_sparnet_th --settings_file config/settings_exp.yaml
"""
from config.settings import Settings
import numpy as np
import argparse
from training.object_cls_trainer import DSSClsModel
from training.object_det_trainer import DSSDetModel
from training.exp_trainer import ExpModel
from utils.log_utils import loadCheckpoint
if 0:
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
def main():
parser = argparse.ArgumentParser(description='Train network.')
parser.add_argument('--settings_file', help='Path to settings yaml', required=False)
args = parser.parse_args()
settings_filepath = args.settings_file
settings = Settings(settings_filepath, generate_log=False)
# settings.batch_size=1
th = [0, 0.02, 0.04, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
print('Start evaluating thr-acc-flops relations of SparNet model on %s, ' % settings.dataset_name)
# Build trainer
if settings.model_name == 'dss_cls':
trainer = DSSClsModel(settings)
elif settings.model_name == 'dss_det':
trainer = DSSDetModel(settings)
elif settings.model_name == 'dss_exp':
trainer = ExpModel(settings)
else:
raise ValueError('Model name %s specified in the settings file is not implemented' % settings.model_name)
loadCheckpoint(trainer.model, trainer.settings.resume_ckpt_file)
trainer.model.set_train_mode((True, True, True, True))
for th_ in th:
# trainer.model.set_train_mode((False, False, True, True))
trainer.model.set_thr(th_)
if settings.dataset_name=='NMNIST':
trainer.testEpoch()
print('NMNIST, %s threshold: %.6f,trg_loss: %.6f, acc: %.6f, test_mac%.6f' % (settings.dataset_name, th_, trainer.test_tgt, trainer.test_acc, trainer.test_mac))
else:
trainer.validationEpoch()
print('%s threshold: %.6f,trg_loss: %.6f, acc: %.6f, test_mac%.6f' % (settings.dataset_name, th_, trainer.val_tgt, trainer.val_acc, trainer.val_mac))
if __name__ == "__main__":
main()
|
the-stack_0_10347 | import code
import os
import sys
import wx
from app.app_utils import Chronometer
from app.app_utils import GripyBitmap
from classes.ui import UIManager
from classes.ui import WorkPageController
from classes.ui import WorkPage
# TODO: rever isso... replicado em WellPlot
WP_FLOAT_PANEL = wx.NewId()
class ConsoleController(WorkPageController):
tid = 'console_controller'
_ATTRIBUTES = {
}
def __init__(self, **state):
super().__init__(**state)
class InteractiveConsole(code.InteractiveConsole):
def __init__(self, outputFunc, flushFunc, setPromptFunc, exitCmd, clearFunc, echoFunc=None):
self._output = outputFunc
self._flush = flushFunc
self._echo = echoFunc
self._setPrompt = setPromptFunc
self._exitCmd = exitCmd
self._clearFunc = clearFunc
# Can't use super here because stupid code.
# InteractiveConsole doesn't sub-class object. Grrr!
# code.InteractiveConsole.__init__(self) # , locals=self.namespace)
super().__init__(locals=None) # , filename=self._output) # locals=None, filename="<console>"
self.prompt = ">>>"
def _set_prompt(self, prompt):
self._prompt = prompt
self._setPrompt(prompt)
def _get_prompt(self):
return self._prompt
def write(self, data):
self._output(data)
def _show_error_info(self, exectype, value, tb):
msg = '\nError found! \nError type: ' + exectype.__name__ \
+ '\nDescription: ' + str(value) + '\n'
# print('Traceback:', tb)
self.write(msg)
def push(self, data):
lines = data.split('\n')
if self._echo:
for line in lines:
self._echo("%s %s\n" % (self.prompt, line))
c = Chronometer()
# Capture stdout/stderr output as well as code interaction.
stdout, stderr = sys.stdout, sys.stderr
temp_excepthook = sys.excepthook
sys.excepthook = self._show_error_info
#
sys.stdout = sys.stderr = self
for line in lines:
# more = code.InteractiveConsole.push(self, line)
more = super().push(line)
self.prompt = "..." if more else ">>>"
#
if self._echo:
self._echo("%s \n\n" % (c.end()))
#
sys.excepthook = temp_excepthook
sys.stdout, sys.stderr = stdout, stderr
def flush(self):
self._flush()
class Console(WorkPage):
tid = 'console'
_TID_FRIENDLY_NAME = 'Coding Console'
def __init__(self, controller_uid):
super().__init__(controller_uid)
# Top
self.sizer = wx.BoxSizer(wx.VERTICAL)
self._tool_bar = wx.aui.AuiToolBar(self)
self.sizer.Add(self._tool_bar, 0, flag=wx.TOP | wx.EXPAND)
# Center
self._main_panel = wx.Panel(self)
self.sizer.Add(self._main_panel, 1, flag=wx.EXPAND)
#
self.SetSizer(self.sizer)
# Then, let's construct our ToolBar
self._build_tool_bar()
# super(DebugConsoleFrame, self).__init__(parent,
# wx.ID_ANY,
# 'GRIPy Python Debug Console'
# )
# self.Bind(wx.EVT_ACTIVATE, self.onActivate)
# self.sizer = wx.BoxSizer(wx.VERTICAL)
# self._main_panel = wx.Panel(self)
# self.sizer.Add(self._main_panel, 1, flag=wx.EXPAND)
main_panel_sizer = wx.BoxSizer(wx.VERTICAL)
top_panel = wx.Panel(self._main_panel, -1)
font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Consolas')
self.outputCtrl = wx.TextCtrl(top_panel, wx.ID_ANY,
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH2
)
self.outputCtrl.Bind(wx.EVT_KEY_DOWN, self.onOutputKeyDown)
self.outputCtrl.Bind(wx.EVT_CHAR, self.onOutputChar)
output_attr = wx.TextAttr(wx.Colour(255, 0, 0), font=font)
self.outputCtrl.SetDefaultStyle(output_attr)
#
self.inputCtrl = wx.TextCtrl(top_panel, wx.ID_ANY,
style=wx.TE_RICH2 | wx.TE_MULTILINE | wx.TE_DONTWRAP | wx.TE_PROCESS_TAB
)
self.inputCtrl.Bind(wx.EVT_CHAR, self.onInputChar)
self.inputCtrl.SetFont(font)
#
top_sizer = wx.BoxSizer(wx.HORIZONTAL)
top_sizer.Add(self.inputCtrl, proportion=4, flag=wx.EXPAND)
top_sizer.Add(self.outputCtrl, proportion=4, flag=wx.EXPAND)
top_panel.SetSizer(top_sizer)
bottom_panel = wx.Panel(self._main_panel, -1)
### Begin - buttons_panel
buttons_panel = wx.Panel(bottom_panel)
self.clear_input_button = wx.Button(buttons_panel,
label='Clear input'
)
self.clear_input_button.Bind(wx.EVT_BUTTON, self.onClearInput)
self.clear_output_button = wx.Button(buttons_panel,
label='Clear output'
)
self.clear_output_button.Bind(wx.EVT_BUTTON, self.onClearOutput)
self.clear_all_button = wx.Button(buttons_panel,
label='Clear all'
)
self.clear_all_button.Bind(wx.EVT_BUTTON, self.onClearAll)
self.execute_button_selected = wx.Button(buttons_panel,
label='Excecute selected'
)
self.execute_button_selected.Bind(wx.EVT_BUTTON,
self.onExecuteSelected
)
self.execute_button_all = wx.Button(buttons_panel,
label='Excecute all'
)
self.execute_button_all.Bind(wx.EVT_BUTTON,
self.onExecuteAll
)
self.load_button = wx.Button(buttons_panel,
label='Load'
)
self.load_button.Bind(wx.EVT_BUTTON,
self.onLoadFile
)
self.save_button = wx.Button(buttons_panel,
label='Save'
)
self.save_button.Bind(wx.EVT_BUTTON,
self.onSaveFile
)
self.save_button_as = wx.Button(buttons_panel,
label='Save as'
)
self.save_button_as.Bind(wx.EVT_BUTTON,
self.onSaveFileAs
)
buttons_panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
buttons_panel_sizer.Add(self.clear_input_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.clear_output_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.clear_all_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.execute_button_selected,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.execute_button_all,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.load_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.save_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.save_button_as,
0,
wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT,
10
)
buttons_panel.SetSizer(buttons_panel_sizer)
buttons_panel.Layout()
### End - buttons_panel
bottom_panel_sizer = wx.BoxSizer(wx.VERTICAL)
bottom_panel_sizer.Add(buttons_panel, 1, wx.ALIGN_CENTER | wx.ALL, 2)
bottom_panel.SetSizer(bottom_panel_sizer)
bottom_panel.Layout()
main_panel_sizer.Add(top_panel, 1, wx.EXPAND)
bottom_panel.SetMinSize((40, 40))
main_panel_sizer.Add(bottom_panel, 0, wx.EXPAND)
#
self._main_panel.SetSizer(main_panel_sizer)
self.console = InteractiveConsole(outputFunc=self.output,
flushFunc=self.flush,
exitCmd=self.Close,
clearFunc=self.clearOutput,
echoFunc=self.echo,
setPromptFunc=self.setPrompt
)
# main_panel_sizer.Layout()
self.Layout()
# self.SetSize((1350,700))
# self.SetPosition((10,10))
#
self.Bind(wx.EVT_CLOSE, self.onClose)
#
gripy_app = wx.GetApp()
_fullfilename = gripy_app._gripy_app_state.get('gripy_debug_file')
_fullfilename = os.path.normpath(_fullfilename)
self.file_name = os.path.basename(_fullfilename)
self.dir_name = os.path.dirname(_fullfilename)
#
if not os.path.isdir(self.dir_name):
os.makedirs(self.dir_name)
msg = 'DebugConsoleFrame.__init__ has created directory: {}'.format(self.dir_name)
# log.debug(msg)
# print(msg)
if not os.path.isfile(_fullfilename):
open(_fullfilename, 'a').close()
msg = 'DebugConsoleFrame.__init__ has created empty file: {}'.format(_fullfilename)
# log.debug(msg)
# print (msg)
if self.file_name and self.dir_name:
self._load_file()
def get_friendly_name(self):
idx = self._get_sequence_number()
name = self._get_tid_friendly_name() \
+ ': ' + '[' + str(idx) + ']'
return name
def _build_tool_bar(self):
self.fp_item = self._tool_bar.AddTool(WP_FLOAT_PANEL,
wx.EmptyString,
GripyBitmap('restore_window-25.png'),
wx.NullBitmap,
wx.ITEM_CHECK,
'Float Panel',
'Float Panel',
None
)
self._tool_bar.ToggleTool(WP_FLOAT_PANEL, False)
self._tool_bar.Bind(wx.EVT_TOOL, self._on_change_float_panel, None,
WP_FLOAT_PANEL
)
self._tool_bar.AddSeparator()
self._tool_bar.Realize()
#
def _on_change_float_panel(self, event):
# TODO: Integrar binds de toggle buttons...
if event.GetId() == WP_FLOAT_PANEL:
UIM = UIManager()
controller = UIM.get(self._controller_uid)
controller.float_mode = event.IsChecked()
def onLoadFile(self, evt):
style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
wildcard = "Arquivo de console GRIPy (*.gripy_console)|*.gripy_console"
fdlg = wx.FileDialog(self, 'Escolha o arquivo gripy_console',
defaultDir=self.dir_name,
wildcard=wildcard,
style=style
)
if fdlg.ShowModal() == wx.ID_OK:
self.file_name = fdlg.GetFilename()
self.dir_name = fdlg.GetDirectory()
self._load_file()
fdlg.Destroy()
def _load_file(self):
self.inputCtrl.LoadFile(os.path.join(self.dir_name, self.file_name))
def onSaveFileAs(self, evt):
style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
wildcard = "Arquivo de console GRIPy (*.gripy_console)|*.gripy_console"
fdlg = wx.FileDialog(self, 'Escolha o arquivo gripy_console',
defaultDir=self.dir_name,
wildcard=wildcard,
style=style
)
if fdlg.ShowModal() == wx.ID_OK:
self.file_name = fdlg.GetFilename()
self.dir_name = fdlg.GetDirectory()
self._do_save()
fdlg.Destroy()
def onSaveFile(self, evt):
self._do_save()
def _do_save(self):
self.inputCtrl.SaveFile(os.path.join(self.dir_name, self.file_name))
def onExecuteAll(self, evt):
data = self.inputCtrl.GetValue()
data = data + '\n'
self.console.push(data)
def onExecuteSelected(self, evt):
data = self.inputCtrl.GetStringSelection()
data = data + '\n'
self.console.push(data)
def onClearInput(self, evt):
self.clearInput()
def onClearOutput(self, evt):
self.clearOutput()
def onClearAll(self, evt):
self.clearInput()
self.clearOutput()
def onActivate(self, evt):
if evt.GetActive():
self.inputCtrl.SetFocus()
evt.Skip()
def onClose(self, evt):
self._do_save()
evt.Skip()
print('\n\nonClose')
def output(self, data):
self.outputCtrl.WriteText(data)
def flush(self):
self.outputCtrl.flush()
def echo(self, data):
self.outputCtrl.WriteText(data)
def setPrompt(self, prompt):
self.promptLabel.SetLabel(prompt)
def onInputChar(self, evt):
key = evt.GetKeyCode()
if key == wx.WXK_TAB:
data = self.inputCtrl.GetValue()
ins_point = self.inputCtrl.GetInsertionPoint()
last_point = self.inputCtrl.GetLastPosition()
line_number = len(data[0:ins_point].split("\n"))
if line_number > 1:
ins_point -= line_number - 1
data = data[0:ins_point] + ' ' + data[ins_point:last_point]
self.inputCtrl.ChangeValue(data)
self.inputCtrl.SetInsertionPoint(ins_point + 3 + line_number)
return
elif key == wx.WXK_F6:
self.outputCtrl.SetFocus()
return
elif key == wx.WXK_ESCAPE:
self.Close()
return
evt.Skip()
def clearOutput(self):
self.outputCtrl.ChangeValue("")
def clearInput(self):
self.inputCtrl.ChangeValue("")
def onOutputKeyDown(self, evt):
key = evt.GetKeyCode()
# #3763: WX 3 no longer passes escape to evt_char for richEdit fields, therefore evt_key_down is used.
if key == wx.WXK_ESCAPE:
self.Close()
return
evt.Skip()
def onOutputChar(self, evt):
key = evt.GetKeyCode()
if key == wx.WXK_F6:
self.inputCtrl.SetFocus()
return
evt.Skip()
|
the-stack_0_10348 | import copy
import itertools
import os
import tempfile
import unittest
import numpy as np
import pytest
import torch
from torch import nn
import pfrl
from pfrl.agents import ppo
from pfrl.agents.ppo import PPO
from pfrl.envs.abc import ABC
from pfrl.experiments import (
train_agent_batch_with_evaluation,
train_agent_with_evaluation,
)
from pfrl.experiments.evaluator import (
batch_run_evaluation_episodes,
run_evaluation_episodes,
)
from pfrl.nn import RecurrentBranched, RecurrentSequential
from pfrl.policies import (
GaussianHeadWithStateIndependentCovariance,
SoftmaxCategoricalHead,
)
from pfrl.testing import torch_assert_allclose
from pfrl.utils.batch_states import batch_states
make_random_episodes = ABC.make_random_episodes
class TestYieldSubsetOfSequencesWithFixedNumberOfItems(unittest.TestCase):
def test_manual(self):
episodes = [
[1, 2, 3],
[4, 5],
[6, 7, 8],
[9],
[10, 11, 12],
]
self.assertEqual(
list(
ppo._yield_subset_of_sequences_with_fixed_number_of_items(episodes, 4)
),
[
[[1, 2, 3], [4]],
[[5], [6, 7, 8]],
[[9], [10, 11, 12]],
],
)
self.assertEqual(
list(
ppo._yield_subset_of_sequences_with_fixed_number_of_items(episodes, 3)
),
[
[[1, 2, 3]],
[[4, 5], [6]],
[[7, 8], [9]],
[[10, 11, 12]],
],
)
self.assertEqual(
list(
ppo._yield_subset_of_sequences_with_fixed_number_of_items(episodes, 2)
),
[
[[1, 2]],
[[3], [4]],
[[5], [6]],
[[7, 8]],
[[9], [10]],
[[11, 12]],
],
)
class TestLimitSequenceLength(unittest.TestCase):
def test_manual(self):
episodes = [
[1, 2, 3],
[4, 5],
[6, 7, 8],
[9],
]
self.assertEqual(
ppo._limit_sequence_length(episodes, 1),
[[1], [2], [3], [4], [5], [6], [7], [8], [9]],
)
self.assertEqual(
ppo._limit_sequence_length(episodes, 2),
[
[1, 2],
[3],
[4, 5],
[6, 7],
[8],
[9],
],
)
self.assertEqual(
ppo._limit_sequence_length(episodes, 3),
episodes,
)
self.assertEqual(
ppo._limit_sequence_length(episodes, 4),
episodes,
)
def test_random(self):
episodes = make_random_episodes()
limit = 5
new_episodes = pfrl.agents.ppo._limit_sequence_length(episodes, limit)
for ep in new_episodes:
self.assertLessEqual(len(ep), limit)
# They should have the same number of transitions
self.assertEqual(
sum(len(ep) for ep in episodes), sum(len(ep) for ep in new_episodes)
)
@pytest.mark.parametrize("use_obs_normalizer", [True, False])
@pytest.mark.parametrize("gamma", [1, 0.8, 0])
@pytest.mark.parametrize("lambd", [1, 0.8, 0])
@pytest.mark.parametrize("max_recurrent_sequence_len", [None, 7])
def test_ppo_dataset_recurrent_and_non_recurrent_equivalence(
use_obs_normalizer, gamma, lambd, max_recurrent_sequence_len
):
"""Test equivalence between recurrent and non-recurrent datasets.
When the same feed-forward model is used, the values of
log_prob, v_pred, next_v_pred obtained by both recurrent and
non-recurrent dataset creation functions should be the same.
"""
episodes = make_random_episodes()
if use_obs_normalizer:
obs_normalizer = pfrl.nn.EmpiricalNormalization(2, clip_threshold=5)
obs_normalizer.experience(torch.rand(10, 2))
else:
obs_normalizer = None
def phi(obs):
return (obs * 0.5).astype(np.float32)
device = torch.device("cpu")
obs_size = 2
n_actions = 3
non_recurrent_model = pfrl.nn.Branched(
nn.Sequential(
nn.Linear(obs_size, n_actions),
SoftmaxCategoricalHead(),
),
nn.Linear(obs_size, 1),
)
recurrent_model = RecurrentSequential(
non_recurrent_model,
)
dataset = pfrl.agents.ppo._make_dataset(
episodes=copy.deepcopy(episodes),
model=non_recurrent_model,
phi=phi,
batch_states=batch_states,
obs_normalizer=obs_normalizer,
gamma=gamma,
lambd=lambd,
device=device,
)
dataset_recurrent = pfrl.agents.ppo._make_dataset_recurrent(
episodes=copy.deepcopy(episodes),
model=recurrent_model,
phi=phi,
batch_states=batch_states,
obs_normalizer=obs_normalizer,
gamma=gamma,
lambd=lambd,
max_recurrent_sequence_len=max_recurrent_sequence_len,
device=device,
)
assert "log_prob" not in episodes[0][0]
assert "log_prob" in dataset[0]
assert "log_prob" in dataset_recurrent[0][0]
# They are not just shallow copies
assert dataset[0]["log_prob"] is not dataset_recurrent[0][0]["log_prob"]
states = [tr["state"] for tr in dataset]
recurrent_states = [
tr["state"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(states, recurrent_states)
actions = [tr["action"] for tr in dataset]
recurrent_actions = [
tr["action"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(actions, recurrent_actions)
rewards = [tr["reward"] for tr in dataset]
recurrent_rewards = [
tr["reward"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(rewards, recurrent_rewards)
nonterminals = [tr["nonterminal"] for tr in dataset]
recurrent_nonterminals = [
tr["nonterminal"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(nonterminals, recurrent_nonterminals)
log_probs = [tr["log_prob"] for tr in dataset]
recurrent_log_probs = [
tr["log_prob"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(log_probs, recurrent_log_probs)
vs_pred = [tr["v_pred"] for tr in dataset]
recurrent_vs_pred = [
tr["v_pred"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(vs_pred, recurrent_vs_pred)
next_vs_pred = [tr["next_v_pred"] for tr in dataset]
recurrent_next_vs_pred = [
tr["next_v_pred"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(next_vs_pred, recurrent_next_vs_pred)
advs = [tr["adv"] for tr in dataset]
recurrent_advs = [
tr["adv"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(advs, recurrent_advs)
vs_teacher = [tr["v_teacher"] for tr in dataset]
recurrent_vs_teacher = [
tr["v_teacher"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(vs_teacher, recurrent_vs_teacher)
class _TestPPO:
@pytest.fixture(autouse=True)
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.agent_dirname = os.path.join(self.tmpdir, "agent_final")
@pytest.mark.slow
def test_abc_cpu(self):
self._test_abc()
self._test_abc(steps=0, load_model=True)
@pytest.mark.slow
@pytest.mark.gpu
def test_abc_gpu(self):
self._test_abc(gpu=0)
def test_abc_fast_cpu(self):
self._test_abc(steps=100, require_success=False)
self._test_abc(steps=0, require_success=False, load_model=True)
@pytest.mark.gpu
def test_abc_fast_gpu(self):
self._test_abc(steps=100, require_success=False, gpu=0)
@pytest.mark.slow
def test_abc_batch_cpu(self):
self._test_abc_batch()
self._test_abc_batch(steps=0, load_model=True)
@pytest.mark.slow
@pytest.mark.gpu
def test_abc_batch_gpu(self):
self._test_abc_batch(gpu=0)
def test_abc_batch_fast_cpu(self):
self._test_abc_batch(steps=100, require_success=False)
self._test_abc_batch(steps=0, require_success=False, load_model=True)
@pytest.mark.gpu
def test_abc_batch_fast_gpu(self):
self._test_abc_batch(steps=100, require_success=False, gpu=0)
def _test_abc(self, steps=100000, require_success=True, gpu=-1, load_model=False):
env, _ = self.make_env_and_successful_return(test=False)
test_env, successful_return = self.make_env_and_successful_return(test=True)
agent = self.make_agent(env, gpu)
max_episode_len = None if self.episodic else 2
if load_model:
print("Load agent from", self.agent_dirname)
agent.load(self.agent_dirname)
# Train
train_agent_with_evaluation(
agent=agent,
env=env,
steps=steps,
outdir=self.tmpdir,
eval_interval=200,
eval_n_steps=None,
eval_n_episodes=50,
successful_score=successful_return,
eval_env=test_env,
train_max_episode_len=max_episode_len,
)
# Test
n_test_runs = 10
eval_returns, _ = run_evaluation_episodes(
test_env,
agent,
n_steps=None,
n_episodes=n_test_runs,
max_episode_len=max_episode_len,
)
if require_success:
n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return)
assert n_succeeded == n_test_runs
# Save
agent.save(self.agent_dirname)
def _test_abc_batch(
self, steps=100000, require_success=True, gpu=-1, load_model=False, num_envs=4
):
env, _ = self.make_vec_env_and_successful_return(test=False, num_envs=num_envs)
test_env, successful_return = self.make_vec_env_and_successful_return(
test=True, num_envs=num_envs
)
agent = self.make_agent(env, gpu)
max_episode_len = None if self.episodic else 2
if load_model:
print("Load agent from", self.agent_dirname)
agent.load(self.agent_dirname)
# Train
train_agent_batch_with_evaluation(
agent=agent,
env=env,
steps=steps,
outdir=self.tmpdir,
eval_interval=200,
eval_n_steps=None,
eval_n_episodes=40,
successful_score=successful_return,
eval_env=test_env,
log_interval=100,
max_episode_len=max_episode_len,
)
env.close()
# Test
n_test_runs = 10
eval_returns, _ = batch_run_evaluation_episodes(
test_env,
agent,
n_steps=None,
n_episodes=n_test_runs,
max_episode_len=max_episode_len,
)
test_env.close()
if require_success:
n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return)
assert n_succeeded == n_test_runs
# Save
agent.save(self.agent_dirname)
def make_agent(self, env, gpu):
model = self.make_model(env)
opt = torch.optim.Adam(model.parameters(), lr=1e-2)
return self.make_ppo_agent(env=env, model=model, opt=opt, gpu=gpu)
def make_ppo_agent(self, env, model, opt, gpu):
return PPO(
model,
opt,
gpu=gpu,
gamma=0.8,
lambd=self.lambd,
update_interval=64,
minibatch_size=16,
epochs=3,
clip_eps_vf=self.clip_eps_vf,
standardize_advantages=self.standardize_advantages,
recurrent=self.recurrent,
entropy_coef=1e-5,
act_deterministically=True,
max_grad_norm=1.0,
)
def make_model(self, env):
hidden_size = 20
obs_size = env.observation_space.low.size
def weight_scale(layer, scale):
with torch.no_grad():
layer.weight.mul_(scale)
return layer
if self.recurrent:
v = RecurrentSequential(
nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),
weight_scale(nn.Linear(hidden_size, 1), 1e-1),
)
if self.discrete:
n_actions = env.action_space.n
pi = RecurrentSequential(
nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),
weight_scale(nn.Linear(hidden_size, n_actions), 1e-1),
SoftmaxCategoricalHead(),
)
else:
action_size = env.action_space.low.size
pi = RecurrentSequential(
nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),
weight_scale(nn.Linear(hidden_size, action_size), 1e-1),
GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type="diagonal",
var_func=lambda x: torch.exp(2 * x),
var_param_init=0,
),
)
return RecurrentBranched(pi, v)
else:
v = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.Tanh(),
weight_scale(nn.Linear(hidden_size, 1), 1e-1),
)
if self.discrete:
n_actions = env.action_space.n
pi = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.Tanh(),
weight_scale(nn.Linear(hidden_size, n_actions), 1e-1),
SoftmaxCategoricalHead(),
)
else:
action_size = env.action_space.low.size
pi = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.Tanh(),
weight_scale(nn.Linear(hidden_size, action_size), 1e-1),
GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type="diagonal",
var_func=lambda x: torch.exp(2 * x),
var_param_init=0,
),
)
return pfrl.nn.Branched(pi, v)
def make_env_and_successful_return(self, test):
env = ABC(
discrete=self.discrete,
deterministic=test,
episodic=self.episodic,
partially_observable=self.recurrent,
)
return env, 1.0
def make_vec_env_and_successful_return(self, test, num_envs=3):
def make_env():
return self.make_env_and_successful_return(test)[0]
vec_env = pfrl.envs.MultiprocessVectorEnv([make_env for _ in range(num_envs)])
return vec_env, 1.0
@pytest.mark.parametrize("clip_eps_vf", [None, 0.2])
@pytest.mark.parametrize("lambd", [0.0, 0.5])
@pytest.mark.parametrize("discrete", [False, True])
@pytest.mark.parametrize("standardize_advantages", [False, True])
@pytest.mark.parametrize("episodic", [True, False])
class TestPPONonRecurrent(_TestPPO):
@pytest.fixture(autouse=True)
def set_params(
self,
clip_eps_vf,
lambd,
discrete,
standardize_advantages,
episodic,
):
self.clip_eps_vf = clip_eps_vf
self.lambd = lambd
self.discrete = discrete
self.standardize_advantages = standardize_advantages
self.episodic = episodic
self.recurrent = False
@pytest.mark.parametrize("clip_eps_vf", [0.2])
@pytest.mark.parametrize("lambd", [0.0, 0.5])
@pytest.mark.parametrize("discrete", [False, True])
@pytest.mark.parametrize("standardize_advantages", [True])
@pytest.mark.parametrize("episodic", [True, False])
class TestPPORecurrent(_TestPPO):
@pytest.fixture(autouse=True)
def set_params(
self,
clip_eps_vf,
lambd,
discrete,
standardize_advantages,
episodic,
):
self.clip_eps_vf = clip_eps_vf
self.lambd = lambd
self.discrete = discrete
self.standardize_advantages = standardize_advantages
self.episodic = episodic
self.recurrent = True
def test_yield_minibatches_divisible():
dataset = [1, 2, 3, 4]
minibatches = list(ppo._yield_minibatches(dataset, minibatch_size=2, num_epochs=3))
assert len(minibatches) == 6
samples = sum(minibatches, [])
assert len(samples) == 12
assert {1, 2, 3, 4} == set(samples[:4])
assert {1, 2, 3, 4} == set(samples[4:8])
assert {1, 2, 3, 4} == set(samples[8:12])
def test_yield_minibatches_indivisible():
dataset = [1, 2, 3]
minibatches = list(ppo._yield_minibatches(dataset, minibatch_size=2, num_epochs=3))
assert len(minibatches) == 5
samples = sum(minibatches, [])
assert len(samples) == 10
# samples[:6] is from the first two epochs
assert samples[:6].count(1) == 2
assert samples[:6].count(2) == 2
assert samples[:6].count(3) == 2
# samples[6:] is from the final epoch
assert 1 <= samples[6:].count(1) <= 2
assert 1 <= samples[6:].count(2) <= 2
assert 1 <= samples[6:].count(3) <= 2
def test_yield_minibatches_smaller_dataset():
# dataset smaller than minibatch
dataset = [1, 2]
minibatches = list(ppo._yield_minibatches(dataset, minibatch_size=4, num_epochs=3))
assert len(minibatches) == 2
samples = sum(minibatches, [])
assert len(samples) == 8
assert samples.count(1) == 4
assert samples.count(2) == 4
|
the-stack_0_10350 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import functools as ft
import itertools
import os
import re
import shutil
import sys
from llnl.util import tty
from llnl.util.compat import filter, map, zip
from llnl.util.filesystem import (
mkdirp,
remove_dead_links,
remove_empty_directories,
visit_directory_tree,
)
from llnl.util.lang import index_by, match_predicate
from llnl.util.link_tree import (
DestinationMergeVisitor,
LinkTree,
MergeConflictSummary,
SingleMergeConflictError,
SourceMergeVisitor,
)
from llnl.util.symlink import symlink
from llnl.util.tty.color import colorize
import spack.config
import spack.projections
import spack.schema.projections
import spack.spec
import spack.store
import spack.util.spack_json as s_json
import spack.util.spack_yaml as s_yaml
from spack.directory_layout import (
ExtensionAlreadyInstalledError,
YamlViewExtensionsLayout,
)
from spack.error import SpackError
__all__ = ["FilesystemView", "YamlFilesystemView"]
_projections_path = '.spack/projections.yaml'
def view_symlink(src, dst, **kwargs):
# keyword arguments are irrelevant
# here to fit required call signature
symlink(src, dst)
def view_hardlink(src, dst, **kwargs):
# keyword arguments are irrelevant
# here to fit required call signature
os.link(src, dst)
def view_copy(src, dst, view, spec=None):
"""
Copy a file from src to dst.
Use spec and view to generate relocations
"""
shutil.copy2(src, dst)
if spec and not spec.external:
# Not metadata, we have to relocate it
# Get information on where to relocate from/to
# This is vestigial code for the *old* location of sbang. Previously,
# sbang was a bash script, and it lived in the spack prefix. It is
# now a POSIX script that lives in the install prefix. Old packages
# will have the old sbang location in their shebangs.
# TODO: Not sure which one to use...
import spack.hooks.sbang as sbang
# Break a package include cycle
import spack.relocate
orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(spack.paths.spack_root)
new_sbang = sbang.sbang_shebang_line()
prefix_to_projection = collections.OrderedDict({
spec.prefix: view.get_projection_for_spec(spec)})
for dep in spec.traverse():
if not dep.external:
prefix_to_projection[dep.prefix] = \
view.get_projection_for_spec(dep)
if spack.relocate.is_binary(dst):
spack.relocate.relocate_text_bin(
binaries=[dst],
prefixes=prefix_to_projection
)
else:
prefix_to_projection[spack.store.layout.root] = view._root
prefix_to_projection[orig_sbang] = new_sbang
spack.relocate.relocate_text(
files=[dst],
prefixes=prefix_to_projection
)
try:
stat = os.stat(src)
os.chown(dst, stat.st_uid, stat.st_gid)
except OSError:
tty.debug('Can\'t change the permissions for %s' % dst)
def view_func_parser(parsed_name):
# What method are we using for this view
if parsed_name in ("hardlink", "hard"):
return view_hardlink
elif parsed_name in ("copy", "relocate"):
return view_copy
elif parsed_name in ("add", "symlink", "soft"):
return view_symlink
else:
raise ValueError("invalid link type for view: '%s'" % parsed_name)
def inverse_view_func_parser(view_type):
# get string based on view type
if view_type is view_hardlink:
link_name = 'hardlink'
elif view_type is view_copy:
link_name = 'copy'
else:
link_name = 'symlink'
return link_name
class FilesystemView(object):
"""
Governs a filesystem view that is located at certain root-directory.
Packages are linked from their install directories into a common file
hierachy.
In distributed filesystems, loading each installed package seperately
can lead to slow-downs due to too many directories being traversed.
This can be circumvented by loading all needed modules into a common
directory structure.
"""
def __init__(self, root, layout, **kwargs):
"""
Initialize a filesystem view under the given `root` directory with
corresponding directory `layout`.
Files are linked by method `link` (llnl.util.symlink by default).
"""
self._root = root
self.layout = layout
self.projections = kwargs.get('projections', {})
self.ignore_conflicts = kwargs.get("ignore_conflicts", False)
self.verbose = kwargs.get("verbose", False)
# Setup link function to include view
link_func = kwargs.get("link", view_symlink)
self.link = ft.partial(link_func, view=self)
def add_specs(self, *specs, **kwargs):
"""
Add given specs to view.
The supplied specs might be standalone packages or extensions of
other packages.
Should accept `with_dependencies` as keyword argument (default
True) to indicate wether or not dependencies should be activated as
well.
Should except an `exclude` keyword argument containing a list of
regexps that filter out matching spec names.
This method should make use of `activate_{extension,standalone}`.
"""
raise NotImplementedError
def add_extension(self, spec):
"""
Add (link) an extension in this view. Does not add dependencies.
"""
raise NotImplementedError
def add_standalone(self, spec):
"""
Add (link) a standalone package into this view.
"""
raise NotImplementedError
def check_added(self, spec):
"""
Check if the given concrete spec is active in this view.
"""
raise NotImplementedError
def remove_specs(self, *specs, **kwargs):
"""
Removes given specs from view.
The supplied spec might be a standalone package or an extension of
another package.
Should accept `with_dependencies` as keyword argument (default
True) to indicate wether or not dependencies should be deactivated
as well.
Should accept `with_dependents` as keyword argument (default True)
to indicate wether or not dependents on the deactivated specs
should be removed as well.
Should except an `exclude` keyword argument containing a list of
regexps that filter out matching spec names.
This method should make use of `deactivate_{extension,standalone}`.
"""
raise NotImplementedError
def remove_extension(self, spec):
"""
Remove (unlink) an extension from this view.
"""
raise NotImplementedError
def remove_standalone(self, spec):
"""
Remove (unlink) a standalone package from this view.
"""
raise NotImplementedError
def get_projection_for_spec(self, spec):
"""
Get the projection in this view for a spec.
"""
raise NotImplementedError
def get_all_specs(self):
"""
Get all specs currently active in this view.
"""
raise NotImplementedError
def get_spec(self, spec):
"""
Return the actual spec linked in this view (i.e. do not look it up
in the database by name).
`spec` can be a name or a spec from which the name is extracted.
As there can only be a single version active for any spec the name
is enough to identify the spec in the view.
If no spec is present, returns None.
"""
raise NotImplementedError
def print_status(self, *specs, **kwargs):
"""
Print a short summary about the given specs, detailing whether..
* ..they are active in the view.
* ..they are active but the activated version differs.
* ..they are not activte in the view.
Takes `with_dependencies` keyword argument so that the status of
dependencies is printed as well.
"""
raise NotImplementedError
class YamlFilesystemView(FilesystemView):
"""
Filesystem view to work with a yaml based directory layout.
"""
def __init__(self, root, layout, **kwargs):
super(YamlFilesystemView, self).__init__(root, layout, **kwargs)
# Super class gets projections from the kwargs
# YAML specific to get projections from YAML file
self.projections_path = os.path.join(self._root, _projections_path)
if not self.projections:
# Read projections file from view
self.projections = self.read_projections()
elif not os.path.exists(self.projections_path):
# Write projections file to new view
self.write_projections()
else:
# Ensure projections are the same from each source
# Read projections file from view
if self.projections != self.read_projections():
msg = 'View at %s has projections file' % self._root
msg += ' which does not match projections passed manually.'
raise ConflictingProjectionsError(msg)
self.extensions_layout = YamlViewExtensionsLayout(self, layout)
self._croot = colorize_root(self._root) + " "
def write_projections(self):
if self.projections:
mkdirp(os.path.dirname(self.projections_path))
with open(self.projections_path, 'w') as f:
f.write(s_yaml.dump_config({'projections': self.projections}))
def read_projections(self):
if os.path.exists(self.projections_path):
with open(self.projections_path, 'r') as f:
projections_data = s_yaml.load(f)
spack.config.validate(projections_data,
spack.schema.projections.schema)
return projections_data['projections']
else:
return {}
def add_specs(self, *specs, **kwargs):
assert all((s.concrete for s in specs))
specs = set(specs)
if kwargs.get("with_dependencies", True):
specs.update(get_dependencies(specs))
if kwargs.get("exclude", None):
specs = set(filter_exclude(specs, kwargs["exclude"]))
conflicts = self.get_conflicts(*specs)
if conflicts:
for s, v in conflicts:
self.print_conflict(v, s)
return
extensions = set(filter(lambda s: s.package.is_extension, specs))
standalones = specs - extensions
set(map(self._check_no_ext_conflicts, extensions))
# fail on first error, otherwise link extensions as well
if all(map(self.add_standalone, standalones)):
all(map(self.add_extension, extensions))
def add_extension(self, spec):
if not spec.package.is_extension:
tty.error(self._croot + 'Package %s is not an extension.'
% spec.name)
return False
if spec.external:
tty.warn(self._croot + 'Skipping external package: %s'
% colorize_spec(spec))
return True
if not spec.package.is_activated(self):
spec.package.do_activate(
self, verbose=self.verbose, with_dependencies=False)
# make sure the meta folder is linked as well (this is not done by the
# extension-activation mechnism)
if not self.check_added(spec):
self.link_meta_folder(spec)
return True
def add_standalone(self, spec):
if spec.package.is_extension:
tty.error(self._croot + 'Package %s is an extension.'
% spec.name)
return False
if spec.external:
tty.warn(self._croot + 'Skipping external package: %s'
% colorize_spec(spec))
return True
if self.check_added(spec):
tty.warn(self._croot + 'Skipping already linked package: %s'
% colorize_spec(spec))
return True
if spec.package.extendable:
# Check for globally activated extensions in the extendee that
# we're looking at.
activated = [p.spec for p in
spack.store.db.activated_extensions_for(spec)]
if activated:
tty.error("Globally activated extensions cannot be used in "
"conjunction with filesystem views. "
"Please deactivate the following specs: ")
spack.cmd.display_specs(activated, flags=True, variants=True,
long=False)
return False
self.merge(spec)
self.link_meta_folder(spec)
if self.verbose:
tty.info(self._croot + 'Linked package: %s' % colorize_spec(spec))
return True
def merge(self, spec, ignore=None):
pkg = spec.package
view_source = pkg.view_source()
view_dst = pkg.view_destination(self)
tree = LinkTree(view_source)
ignore = ignore or (lambda f: False)
ignore_file = match_predicate(
self.layout.hidden_file_regexes, ignore)
# check for dir conflicts
conflicts = tree.find_dir_conflicts(view_dst, ignore_file)
merge_map = tree.get_file_map(view_dst, ignore_file)
if not self.ignore_conflicts:
conflicts.extend(pkg.view_file_conflicts(self, merge_map))
if conflicts:
raise SingleMergeConflictError(conflicts[0])
# merge directories with the tree
tree.merge_directories(view_dst, ignore_file)
pkg.add_files_to_view(self, merge_map)
def unmerge(self, spec, ignore=None):
pkg = spec.package
view_source = pkg.view_source()
view_dst = pkg.view_destination(self)
tree = LinkTree(view_source)
ignore = ignore or (lambda f: False)
ignore_file = match_predicate(
self.layout.hidden_file_regexes, ignore)
merge_map = tree.get_file_map(view_dst, ignore_file)
pkg.remove_files_from_view(self, merge_map)
# now unmerge the directory tree
tree.unmerge_directories(view_dst, ignore_file)
def remove_files(self, files):
def needs_file(spec, file):
# convert the file we want to remove to a source in this spec
projection = self.get_projection_for_spec(spec)
relative_path = os.path.relpath(file, projection)
test_path = os.path.join(spec.prefix, relative_path)
# check if this spec owns a file of that name (through the
# manifest in the metadata dir, which we have in the view).
manifest_file = os.path.join(self.get_path_meta_folder(spec),
spack.store.layout.manifest_file_name)
try:
with open(manifest_file, 'r') as f:
manifest = s_json.load(f)
except (OSError, IOError):
# if we can't load it, assume it doesn't know about the file.
manifest = {}
return test_path in manifest
specs = self.get_all_specs()
for file in files:
if not os.path.lexists(file):
tty.warn("Tried to remove %s which does not exist" % file)
continue
# remove if file is not owned by any other package in the view
# This will only be false if two packages are merged into a prefix
# and have a conflicting file
# check all specs for whether they own the file. That include the spec
# we are currently removing, as we remove files before unlinking the
# metadata directory.
if len([s for s in specs if needs_file(s, file)]) <= 1:
tty.debug("Removing file " + file)
os.remove(file)
def check_added(self, spec):
assert spec.concrete
return spec == self.get_spec(spec)
def remove_specs(self, *specs, **kwargs):
assert all((s.concrete for s in specs))
with_dependents = kwargs.get("with_dependents", True)
with_dependencies = kwargs.get("with_dependencies", False)
# caller can pass this in, as get_all_specs() is expensive
all_specs = kwargs.get("all_specs", None) or set(self.get_all_specs())
specs = set(specs)
if with_dependencies:
specs = get_dependencies(specs)
if kwargs.get("exclude", None):
specs = set(filter_exclude(specs, kwargs["exclude"]))
to_deactivate = specs
to_keep = all_specs - to_deactivate
dependents = find_dependents(to_keep, to_deactivate)
if with_dependents:
# remove all packages depending on the ones to remove
if len(dependents) > 0:
tty.warn(self._croot +
"The following dependents will be removed: %s"
% ", ".join((s.name for s in dependents)))
to_deactivate.update(dependents)
elif len(dependents) > 0:
tty.warn(self._croot +
"The following packages will be unusable: %s"
% ", ".join((s.name for s in dependents)))
# Determine the order that packages should be removed from the view;
# dependents come before their dependencies.
to_deactivate_sorted = list()
depmap = dict()
for spec in to_deactivate:
depmap[spec] = set(d for d in spec.traverse(root=False)
if d in to_deactivate)
while depmap:
for spec in [s for s, d in depmap.items() if not d]:
to_deactivate_sorted.append(spec)
for s in depmap.keys():
depmap[s].discard(spec)
depmap.pop(spec)
to_deactivate_sorted.reverse()
# Ensure that the sorted list contains all the packages
assert set(to_deactivate_sorted) == to_deactivate
# Remove the packages from the view
for spec in to_deactivate_sorted:
if spec.package.is_extension:
self.remove_extension(spec, with_dependents=with_dependents)
else:
self.remove_standalone(spec)
self._purge_empty_directories()
def remove_extension(self, spec, with_dependents=True):
"""
Remove (unlink) an extension from this view.
"""
if not self.check_added(spec):
tty.warn(self._croot +
'Skipping package not linked in view: %s' % spec.name)
return
if spec.package.is_activated(self):
spec.package.do_deactivate(
self,
verbose=self.verbose,
remove_dependents=with_dependents)
self.unlink_meta_folder(spec)
def remove_standalone(self, spec):
"""
Remove (unlink) a standalone package from this view.
"""
if not self.check_added(spec):
tty.warn(self._croot +
'Skipping package not linked in view: %s' % spec.name)
return
self.unmerge(spec)
self.unlink_meta_folder(spec)
if self.verbose:
tty.info(self._croot + 'Removed package: %s' % colorize_spec(spec))
def get_projection_for_spec(self, spec):
"""
Return the projection for a spec in this view.
Relies on the ordering of projections to avoid ambiguity.
"""
spec = spack.spec.Spec(spec)
# Extensions are placed by their extendee, not by their own spec
locator_spec = spec
if spec.package.extendee_spec:
locator_spec = spec.package.extendee_spec
proj = spack.projections.get_projection(self.projections, locator_spec)
if proj:
return os.path.join(self._root, locator_spec.format(proj))
return self._root
def get_all_specs(self):
md_dirs = []
for root, dirs, files in os.walk(self._root):
if spack.store.layout.metadata_dir in dirs:
md_dirs.append(os.path.join(root,
spack.store.layout.metadata_dir))
specs = []
for md_dir in md_dirs:
if os.path.exists(md_dir):
for name_dir in os.listdir(md_dir):
filename = os.path.join(md_dir, name_dir,
spack.store.layout.spec_file_name)
spec = get_spec_from_file(filename)
if spec:
specs.append(spec)
return specs
def get_conflicts(self, *specs):
"""
Return list of tuples (<spec>, <spec in view>) where the spec
active in the view differs from the one to be activated.
"""
in_view = map(self.get_spec, specs)
return [(s, v) for s, v in zip(specs, in_view)
if v is not None and s != v]
def get_path_meta_folder(self, spec):
"Get path to meta folder for either spec or spec name."
return os.path.join(self.get_projection_for_spec(spec),
spack.store.layout.metadata_dir,
getattr(spec, "name", spec))
def get_spec(self, spec):
dotspack = self.get_path_meta_folder(spec)
filename = os.path.join(dotspack,
spack.store.layout.spec_file_name)
return get_spec_from_file(filename)
def link_meta_folder(self, spec):
src = spack.store.layout.metadata_path(spec)
tgt = self.get_path_meta_folder(spec)
tree = LinkTree(src)
# there should be no conflicts when linking the meta folder
tree.merge(tgt, link=self.link)
def print_conflict(self, spec_active, spec_specified, level="error"):
"Singular print function for spec conflicts."
cprint = getattr(tty, level)
color = sys.stdout.isatty()
linked = tty.color.colorize(" (@gLinked@.)", color=color)
specified = tty.color.colorize("(@rSpecified@.)", color=color)
cprint(self._croot + "Package conflict detected:\n"
"%s %s\n" % (linked, colorize_spec(spec_active)) +
"%s %s" % (specified, colorize_spec(spec_specified)))
def print_status(self, *specs, **kwargs):
if kwargs.get("with_dependencies", False):
specs = set(get_dependencies(specs))
specs = sorted(specs, key=lambda s: s.name)
in_view = list(map(self.get_spec, specs))
for s, v in zip(specs, in_view):
if not v:
tty.error(self._croot +
'Package not linked: %s' % s.name)
elif s != v:
self.print_conflict(v, s, level="warn")
in_view = list(filter(None, in_view))
if len(specs) > 0:
tty.msg("Packages linked in %s:" % self._croot[:-1])
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
# Traverse the index and print out each package
for i, (architecture, compiler) in enumerate(sorted(index)):
if i > 0:
print()
header = "%s{%s} / %s{%s}" % (spack.spec.architecture_color,
architecture,
spack.spec.compiler_color,
compiler)
tty.hline(colorize(header), char='-')
specs = index[(architecture, compiler)]
specs.sort()
format_string = '{name}{@version}'
format_string += '{%compiler}{compiler_flags}{variants}'
abbreviated = [s.cformat(format_string) for s in specs]
# Print one spec per line along with prefix path
width = max(len(s) for s in abbreviated)
width += 2
format = " %%-%ds%%s" % width
for abbrv, s in zip(abbreviated, specs):
prefix = ''
if self.verbose:
prefix = colorize('@K{%s}' % s.dag_hash(7))
print(
prefix + (format % (abbrv,
self.get_projection_for_spec(s)))
)
else:
tty.warn(self._croot + "No packages found.")
def _purge_empty_directories(self):
remove_empty_directories(self._root)
def _purge_broken_links(self):
remove_dead_links(self._root)
def clean(self):
self._purge_broken_links()
self._purge_empty_directories()
def unlink_meta_folder(self, spec):
path = self.get_path_meta_folder(spec)
assert os.path.exists(path)
shutil.rmtree(path)
def _check_no_ext_conflicts(self, spec):
"""
Check that there is no extension conflict for specs.
"""
extendee = spec.package.extendee_spec
try:
self.extensions_layout.check_extension_conflict(extendee, spec)
except ExtensionAlreadyInstalledError:
# we print the warning here because later on the order in which
# packages get activated is not clear (set-sorting)
tty.warn(self._croot +
'Skipping already activated package: %s' % spec.name)
class SimpleFilesystemView(FilesystemView):
"""A simple and partial implementation of FilesystemView focused on
performance and immutable views, where specs cannot be removed after they
were added."""
def __init__(self, root, layout, **kwargs):
super(SimpleFilesystemView, self).__init__(root, layout, **kwargs)
def add_specs(self, *specs, **kwargs):
assert all((s.concrete for s in specs))
if len(specs) == 0:
return
# Drop externals
for s in specs:
if s.external:
tty.warn('Skipping external package: ' + s.short_spec)
specs = [s for s in specs if not s.external]
if kwargs.get("exclude", None):
specs = set(filter_exclude(specs, kwargs["exclude"]))
# Ignore spack meta data folder.
def skip_list(file):
return os.path.basename(file) == spack.store.layout.metadata_dir
visitor = SourceMergeVisitor(ignore=skip_list)
# Gather all the directories to be made and files to be linked
for spec in specs:
src_prefix = spec.package.view_source()
visitor.set_projection(self.get_relative_projection_for_spec(spec))
visit_directory_tree(src_prefix, visitor)
# Check for conflicts in destination dir.
visit_directory_tree(self._root, DestinationMergeVisitor(visitor))
# Throw on fatal dir-file conflicts.
if visitor.fatal_conflicts:
raise MergeConflictSummary(visitor.fatal_conflicts)
# Inform about file-file conflicts.
if visitor.file_conflicts:
if self.ignore_conflicts:
tty.debug("{0} file conflicts".format(len(visitor.file_conflicts)))
else:
raise MergeConflictSummary(visitor.file_conflicts)
tty.debug("Creating {0} dirs and {1} links".format(
len(visitor.directories),
len(visitor.files)))
# Make the directory structure
for dst in visitor.directories:
os.mkdir(os.path.join(self._root, dst))
# Then group the files to be linked by spec...
# For compatibility, we have to create a merge_map dict mapping
# full_src => full_dst
files_per_spec = itertools.groupby(
visitor.files.items(), key=lambda item: item[1][0])
for (spec, (src_root, rel_paths)) in zip(specs, files_per_spec):
merge_map = dict()
for dst_rel, (_, src_rel) in rel_paths:
full_src = os.path.join(src_root, src_rel)
full_dst = os.path.join(self._root, dst_rel)
merge_map[full_src] = full_dst
spec.package.add_files_to_view(self, merge_map, skip_if_exists=False)
# Finally create the metadata dirs.
self.link_metadata(specs)
def link_metadata(self, specs):
metadata_visitor = SourceMergeVisitor()
for spec in specs:
src_prefix = os.path.join(
spec.package.view_source(),
spack.store.layout.metadata_dir)
proj = os.path.join(
self.get_relative_projection_for_spec(spec),
spack.store.layout.metadata_dir,
spec.name)
metadata_visitor.set_projection(proj)
visit_directory_tree(src_prefix, metadata_visitor)
# Check for conflicts in destination dir.
visit_directory_tree(self._root, DestinationMergeVisitor(metadata_visitor))
# Throw on dir-file conflicts -- unlikely, but who knows.
if metadata_visitor.fatal_conflicts:
raise MergeConflictSummary(metadata_visitor.fatal_conflicts)
# We are strict here for historical reasons
if metadata_visitor.file_conflicts:
raise MergeConflictSummary(metadata_visitor.file_conflicts)
for dst in metadata_visitor.directories:
os.mkdir(os.path.join(self._root, dst))
for dst_relpath, (src_root, src_relpath) in metadata_visitor.files.items():
self.link(os.path.join(src_root, src_relpath),
os.path.join(self._root, dst_relpath))
def get_relative_projection_for_spec(self, spec):
# Extensions are placed by their extendee, not by their own spec
if spec.package.extendee_spec:
spec = spec.package.extendee_spec
p = spack.projections.get_projection(self.projections, spec)
return spec.format(p) if p else ''
def get_projection_for_spec(self, spec):
"""
Return the projection for a spec in this view.
Relies on the ordering of projections to avoid ambiguity.
"""
spec = spack.spec.Spec(spec)
# Extensions are placed by their extendee, not by their own spec
locator_spec = spec
if spec.package.extendee_spec:
locator_spec = spec.package.extendee_spec
proj = spack.projections.get_projection(self.projections, locator_spec)
if proj:
return os.path.join(self._root, locator_spec.format(proj))
return self._root
#####################
# utility functions #
#####################
def get_spec_from_file(filename):
try:
with open(filename, "r") as f:
return spack.spec.Spec.from_yaml(f)
except IOError:
return None
def colorize_root(root):
colorize = ft.partial(tty.color.colorize, color=sys.stdout.isatty())
pre, post = map(colorize, "@M[@. @M]@.".split())
return "".join([pre, root, post])
def colorize_spec(spec):
"Colorize spec output if in TTY."
if sys.stdout.isatty():
return spec.cshort_spec
else:
return spec.short_spec
def find_dependents(all_specs, providers, deptype='run'):
"""
Return a set containing all those specs from all_specs that depend on
providers at the given dependency type.
"""
dependents = set()
for s in all_specs:
for dep in s.traverse(deptype=deptype):
if dep in providers:
dependents.add(s)
return dependents
def filter_exclude(specs, exclude):
"Filter specs given sequence of exclude regex"
to_exclude = [re.compile(e) for e in exclude]
def keep(spec):
for e in to_exclude:
if e.match(spec.name):
return False
return True
return filter(keep, specs)
def get_dependencies(specs):
"Get set of dependencies (includes specs)"
retval = set()
set(map(retval.update, (set(s.traverse()) for s in specs)))
return retval
class ConflictingProjectionsError(SpackError):
"""Raised when a view has a projections file and is given one manually."""
|
the-stack_0_10351 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.enums",
marshal="google.ads.googleads.v4",
manifest={"AssetTypeEnum",},
)
class AssetTypeEnum(proto.Message):
r"""Container for enum describing the types of asset."""
class AssetType(proto.Enum):
r"""Enum describing possible types of asset."""
UNSPECIFIED = 0
UNKNOWN = 1
YOUTUBE_VIDEO = 2
MEDIA_BUNDLE = 3
IMAGE = 4
TEXT = 5
BOOK_ON_GOOGLE = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_10352 | from Effects.Effect import Effect
from PIL import ImageDraw
class Negative(Effect):
def Iteration(self):
for i in range(self.width):
for j in range(self.height):
a = self.pix[i, j][0]
b = self.pix[i, j][1]
c = self.pix[i, j][2]
self.draw.point((i, j), (255 - a, 255 - b, 255 - c))
|
the-stack_0_10353 | """Provide the MessageableMixin class."""
from ....const import API_PATH
class MessageableMixin:
"""Interface for classes that can be messaged."""
def message(self, subject, message, from_subreddit=None):
"""
Send a message to a redditor or a subreddit's moderators (mod mail).
:param subject: The subject of the message.
:param message: The message content.
:param from_subreddit: A :class:`~.Subreddit` instance or string to
send the message from. When provided, messages are sent from
the subreddit rather than from the authenticated user.
Note that the authenticated user must be a moderator of the
subreddit and have the ``mail`` moderator permission.
For example, to send a private message to ``u/spez``, try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from PRAW')
To send a message to ``u/spez`` from the moderators of ``r/test`` try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from r/test',
from_subreddit='test')
To send a message to the moderators of ``r/test``, try:
.. code:: python
reddit.subreddit('test').message('TEST', 'test PM from PRAW')
"""
data = {
"subject": subject,
"text": message,
"to": "{}{}".format(
getattr(self.__class__, "MESSAGE_PREFIX", ""), self
),
}
if from_subreddit:
data["from_sr"] = str(from_subreddit)
self._reddit.post(API_PATH["compose"], data=data)
|
the-stack_0_10357 | import c4d
from RedshiftWrapper.Redshift import Redshift
def main():
rs = Redshift()
if rs is False: return
#Assign Material
rs.SetMat(doc.GetFirstMaterial())
#Get all node and assign color
listNode = rs.GetAllNodes()
for node in listNode:
node.SetColor()
c4d.EventAdd()
if __name__=='__main__':
main() |
the-stack_0_10358 | """
Module for managing operation and controller modes.
Operation modes can be 'auto', 'comfort', 'standby', 'economy', 'protection' and use either a binary DPT or DPT 20.102.
Controller modes use DPT 20.105.
"""
from itertools import chain
from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Union
from xknx.dpt.dpt_hvac_mode import HVACControllerMode, HVACOperationMode
from xknx.exceptions import DeviceIllegalValue
from xknx.remote_value.remote_value_climate_mode import (
RemoteValueBinaryHeatCool,
RemoteValueBinaryOperationMode,
RemoteValueClimateMode,
RemoteValueClimateModeBase,
)
from .device import Device, DeviceCallbackType
if TYPE_CHECKING:
from xknx.remote_value import RemoteValue
from xknx.telegram import Telegram
from xknx.telegram.address import GroupAddressableType
from xknx.xknx import XKNX
class ClimateMode(Device):
"""Class for managing the climate mode."""
# pylint: disable=invalid-name,too-many-instance-attributes
def __init__(
self,
xknx: "XKNX",
name: str,
group_address_operation_mode: Optional["GroupAddressableType"] = None,
group_address_operation_mode_state: Optional["GroupAddressableType"] = None,
group_address_operation_mode_protection: Optional[
"GroupAddressableType"
] = None,
group_address_operation_mode_night: Optional["GroupAddressableType"] = None,
group_address_operation_mode_comfort: Optional["GroupAddressableType"] = None,
group_address_operation_mode_standby: Optional["GroupAddressableType"] = None,
group_address_controller_status: Optional["GroupAddressableType"] = None,
group_address_controller_status_state: Optional["GroupAddressableType"] = None,
group_address_controller_mode: Optional["GroupAddressableType"] = None,
group_address_controller_mode_state: Optional["GroupAddressableType"] = None,
group_address_heat_cool: Optional["GroupAddressableType"] = None,
group_address_heat_cool_state: Optional["GroupAddressableType"] = None,
operation_modes: Optional[List[Union[str, HVACOperationMode]]] = None,
controller_modes: Optional[List[Union[str, HVACControllerMode]]] = None,
device_updated_cb: Optional[DeviceCallbackType] = None,
):
"""Initialize ClimateMode class."""
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements
super().__init__(xknx, name, device_updated_cb)
self.remote_value_operation_mode: RemoteValueClimateMode[
HVACOperationMode
] = RemoteValueClimateMode(
xknx,
group_address=group_address_operation_mode,
group_address_state=group_address_operation_mode_state,
sync_state=True,
device_name=name,
feature_name="Operation mode",
climate_mode_type=RemoteValueClimateMode.ClimateModeType.HVAC_MODE,
after_update_cb=None,
)
self.remote_value_controller_mode: RemoteValueClimateMode[
HVACControllerMode
] = RemoteValueClimateMode(
xknx,
group_address=group_address_controller_mode,
group_address_state=group_address_controller_mode_state,
sync_state=True,
device_name=name,
feature_name="Controller mode",
climate_mode_type=RemoteValueClimateMode.ClimateModeType.HVAC_CONTR_MODE,
after_update_cb=None,
)
self.remote_value_controller_status: RemoteValueClimateMode[
HVACOperationMode
] = RemoteValueClimateMode(
xknx,
group_address=group_address_controller_status,
group_address_state=group_address_controller_status_state,
sync_state=True,
device_name=name,
feature_name="Controller status",
climate_mode_type=RemoteValueClimateMode.ClimateModeType.CONTROLLER_STATUS,
after_update_cb=None,
)
self.remote_value_operation_mode_comfort = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_comfort,
group_address_state=group_address_operation_mode_comfort,
sync_state=True,
device_name=name,
feature_name="Operation mode Comfort",
operation_mode=HVACOperationMode.COMFORT,
after_update_cb=None,
)
self.remote_value_operation_mode_standby = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_standby,
group_address_state=group_address_operation_mode_standby,
sync_state=True,
device_name=name,
feature_name="Operation mode Standby",
operation_mode=HVACOperationMode.STANDBY,
after_update_cb=None,
)
self.remote_value_operation_mode_night = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_night,
group_address_state=group_address_operation_mode_night,
sync_state=True,
device_name=name,
feature_name="Operation mode Night",
operation_mode=HVACOperationMode.NIGHT,
after_update_cb=None,
)
self.remote_value_operation_mode_protection = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_protection,
group_address_state=group_address_operation_mode_protection,
sync_state=True,
device_name=name,
feature_name="Operation mode Protection",
operation_mode=HVACOperationMode.FROST_PROTECTION,
after_update_cb=None,
)
self.remote_value_heat_cool = RemoteValueBinaryHeatCool(
xknx,
group_address=group_address_heat_cool,
group_address_state=group_address_heat_cool_state,
sync_state=True,
device_name=name,
feature_name="Heat/Cool",
controller_mode=HVACControllerMode.HEAT,
after_update_cb=None,
)
self.operation_mode = HVACOperationMode.STANDBY
self.controller_mode = HVACControllerMode.HEAT
self._operation_modes: List[HVACOperationMode] = []
if operation_modes is None:
self._operation_modes = self.gather_operation_modes()
else:
for op_mode in operation_modes:
if isinstance(op_mode, str):
self._operation_modes.append(HVACOperationMode(op_mode))
elif isinstance(op_mode, HVACOperationMode):
self._operation_modes.append(op_mode)
self._controller_modes: List[HVACControllerMode] = []
if controller_modes is None:
self._controller_modes = self.gather_controller_modes()
else:
for ct_mode in controller_modes:
if isinstance(ct_mode, str):
self._controller_modes.append(HVACControllerMode(ct_mode))
elif isinstance(ct_mode, HVACControllerMode):
self._controller_modes.append(ct_mode)
self.supports_operation_mode = any(
operation_mode.initialized
for operation_mode in self._iter_byte_operation_modes()
) or any(
operation_mode.initialized
for operation_mode in self._iter_binary_operation_modes()
)
self.supports_controller_mode = any(
operation_mode.initialized
for operation_mode in self._iter_controller_remote_values()
)
self._use_binary_operation_modes = any(
operation_mode.initialized
for operation_mode in self._iter_binary_operation_modes()
)
@classmethod
def from_config(cls, xknx: "XKNX", name: str, config: Any) -> "ClimateMode":
"""Initialize object from configuration structure."""
# pylint: disable=too-many-locals
group_address_operation_mode = config.get("group_address_operation_mode")
group_address_operation_mode_state = config.get(
"group_address_operation_mode_state"
)
group_address_operation_mode_protection = config.get(
"group_address_operation_mode_protection"
)
group_address_operation_mode_night = config.get(
"group_address_operation_mode_night"
)
group_address_operation_mode_comfort = config.get(
"group_address_operation_mode_comfort"
)
group_address_operation_mode_standby = config.get(
"group_address_operation_mode_standby"
)
group_address_controller_status = config.get("group_address_controller_status")
group_address_controller_status_state = config.get(
"group_address_controller_status_state"
)
group_address_controller_mode = config.get("group_address_controller_mode")
group_address_controller_mode_state = config.get(
"group_address_controller_mode_state"
)
group_address_heat_cool = config.get("group_address_heat_cool")
group_address_heat_cool_state = config.get("group_address_heat_cool_state")
return cls(
xknx,
name,
group_address_operation_mode=group_address_operation_mode,
group_address_operation_mode_state=group_address_operation_mode_state,
group_address_operation_mode_protection=group_address_operation_mode_protection,
group_address_operation_mode_night=group_address_operation_mode_night,
group_address_operation_mode_comfort=group_address_operation_mode_comfort,
group_address_operation_mode_standby=group_address_operation_mode_standby,
group_address_controller_status=group_address_controller_status,
group_address_controller_status_state=group_address_controller_status_state,
group_address_controller_mode=group_address_controller_mode,
group_address_controller_mode_state=group_address_controller_mode_state,
group_address_heat_cool=group_address_heat_cool,
group_address_heat_cool_state=group_address_heat_cool_state,
)
def _iter_remote_values(
self,
) -> Iterator["RemoteValue"]:
"""Iterate climate mode RemoteValue classes."""
return chain(
self._iter_byte_operation_modes(),
self._iter_controller_remote_values(),
self._iter_binary_operation_modes(),
)
def _iter_byte_operation_modes(
self,
) -> Iterator[RemoteValueClimateMode[HVACOperationMode]]:
"""Iterate normal DPT 20.102 operation mode remote values."""
yield from (
self.remote_value_operation_mode,
self.remote_value_controller_status,
)
def _iter_controller_remote_values(
self,
) -> Iterator[RemoteValueClimateModeBase[HVACControllerMode]]:
"""Iterate DPT 20.105 controller remote values."""
yield from (
self.remote_value_controller_mode,
self.remote_value_heat_cool,
)
def _iter_binary_operation_modes(self) -> Iterator[RemoteValueBinaryOperationMode]:
"""Iterate DPT 1 binary operation modes."""
yield from (
self.remote_value_operation_mode_comfort,
self.remote_value_operation_mode_night,
self.remote_value_operation_mode_protection,
self.remote_value_operation_mode_standby,
)
async def _set_internal_operation_mode(
self, operation_mode: HVACOperationMode
) -> None:
"""Set internal value of operation mode. Call hooks if operation mode was changed."""
if operation_mode != self.operation_mode:
self.operation_mode = operation_mode
await self.after_update()
async def _set_internal_controller_mode(
self, controller_mode: HVACControllerMode
) -> None:
"""Set internal value of controller mode. Call hooks if controller mode was changed."""
if controller_mode != self.controller_mode:
self.controller_mode = controller_mode
await self.after_update()
async def set_operation_mode(self, operation_mode: HVACOperationMode) -> None:
"""Set the operation mode of a thermostat. Send new operation_mode to BUS and update internal state."""
if (
not self.supports_operation_mode
or operation_mode not in self._operation_modes
):
raise DeviceIllegalValue(
"operation (preset) mode not supported", str(operation_mode)
)
rv: RemoteValueClimateModeBase[HVACOperationMode]
for rv in chain(
self._iter_byte_operation_modes(), self._iter_binary_operation_modes()
):
if rv.writable and operation_mode in rv.supported_operation_modes():
await rv.set(operation_mode)
await self._set_internal_operation_mode(operation_mode)
async def set_controller_mode(self, controller_mode: HVACControllerMode) -> None:
"""Set the controller mode of a thermostat. Send new controller mode to the bus and update internal state."""
if (
not self.supports_controller_mode
or controller_mode not in self._controller_modes
):
raise DeviceIllegalValue(
"controller (HVAC) mode not supported", str(controller_mode)
)
rv: RemoteValueClimateModeBase[HVACControllerMode]
for rv in self._iter_controller_remote_values():
if rv.writable and controller_mode in rv.supported_operation_modes():
await rv.set(controller_mode)
await self._set_internal_controller_mode(controller_mode)
@property
def operation_modes(self) -> List[HVACOperationMode]:
"""Return all configured operation modes."""
if not self.supports_operation_mode:
return []
return self._operation_modes
@property
def controller_modes(self) -> List[HVACControllerMode]:
"""Return all configured controller modes."""
if not self.supports_controller_mode:
return []
return self._controller_modes
def gather_operation_modes(self) -> List[HVACOperationMode]:
"""Gather operation modes from RemoteValues."""
operation_modes: List[HVACOperationMode] = []
for rv in chain(
self._iter_binary_operation_modes(), self._iter_byte_operation_modes()
):
if rv.writable:
operation_modes.extend(rv.supported_operation_modes())
# remove duplicates
return list(set(operation_modes))
def gather_controller_modes(self) -> List[HVACControllerMode]:
"""Gather controller modes from RemoteValues."""
controller_modes: List[HVACControllerMode] = []
for rv in self._iter_controller_remote_values():
if rv.writable:
controller_modes.extend(rv.supported_operation_modes())
# remove duplicates
return list(set(controller_modes))
async def process_group_write(self, telegram: "Telegram") -> None:
"""Process incoming and outgoing GROUP WRITE telegram."""
if self.supports_operation_mode:
for rv in self._iter_remote_values():
if await rv.process(telegram):
# ignore inactive RemoteValueBinaryOperationMode
if rv.value:
await self._set_internal_operation_mode(rv.value)
return
if self.supports_controller_mode:
for rv in self._iter_controller_remote_values():
if await rv.process(telegram):
await self._set_internal_controller_mode(rv.value)
return
def __str__(self) -> str:
"""Return object as readable string."""
return (
'<ClimateMode name="{}" '
'operation_mode="{}" '
'controller_mode="{}" '
'controller_status="{}" '
"/>".format(
self.name,
self.remote_value_operation_mode.group_addr_str(),
self.remote_value_controller_mode.group_addr_str(),
self.remote_value_controller_status.group_addr_str(),
)
)
|
the-stack_0_10362 | """API to persist and read upload data metrics"""
import os
import sqlite3
DATABASE_FILENAME = os.path.expanduser('~/pypic.db')
def create_upload_table():
"""Create the necessary database objects for upload monitoring and
persistent data regarding all things video uploads
"""
db_connection = sqlite3.connect(DATABASE_FILENAME)
cursor = db_connection.cursor()
if not len(
cursor.execute(
'select * from sqlite_master where name = ?',
('uploads',)
).fetchall()):
cursor.execute(
'''create table uploads (
date_created text,
file_name text,
uploaded integer,
other_info text
)'''
)
def insert_upload_data(file_name, date_created, is_uploaded, other_info):
"""Insert the necessary data to reflect whether or not a video was
uploaded
"""
db_connection = sqlite3.connect(DATABASE_FILENAME)
cursor = db_connection.cursor()
cursor.execute(
'insert into uploads values (?, ?, ?, ?)',
(str(date_created), file_name, int(is_uploaded), other_info)
)
db_connection.commit()
db_connection.close()
|
the-stack_0_10363 | from gcn.inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
#self.degree_mat = degree_mat
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
#self.ob = placeholders['observation']
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
#with tf.variable_scope('_vars'):
#for i in range(len(self.support)):
for i in range(1):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],name='weights_' + str(i))
tf.add_to_collection('weight', self.vars['weights_' + str(i)])
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
with tf.variable_scope(self.name +'_adj_vars'):
self.vars['adj'] = tf.get_variable(name='adj',shape=self.support.shape,
initializer=tf.constant_initializer(self.support),
trainable=False)
tf.add_to_collection('adj', self.vars['adj'])
# with tf.variable_scope(self.name +'_identity_vars'):
# self.vars['identity'] = tf.get_variable(name='identity',shape=self.identity.shape,
# initializer=tf.constant_initializer(self.identity),
# trainable=False)
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
#for i in range(len(self.support)):
for i in range(1):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
degree_inverted = tf.diag(tf.rsqrt(tf.reduce_sum(self.vars['adj'], 1)))
normalized_adj = tf.matmul(self.vars['adj'], degree_inverted)
normalized_adj = tf.transpose(normalized_adj)
normalized_adj = tf.matmul(normalized_adj, degree_inverted)
support = dot(normalized_adj, pre_sup, sparse=False)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
|
the-stack_0_10364 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
import sys
sys.path.extend([os.path.dirname(os.path.abspath(__file__))])
import cv2
import time
import numpy as np
import tensorflow as tf
import utils
from OneEuroFilter import OneEuroFilter
class VNectEstimator:
# the side length of the CNN input box
box_size = 368
# the input box size is 8 times the side length of the output heatmaps
hm_factor = 8
# sum of the joints to be detected
joints_sum = 21
# parent joint indexes of each joint (for plotting the skeletal lines)
joint_parents = [16, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13]
def __init__(self):
print('Initializing VNect Estimator...')
# the scale factors to zoom down the input image crops
# put different scales to get better average performance
# for faster loops, use less scales e.g. [1], [1, 0.7]
self.scales = [1, 0.85, 0.7]
# initializing one euro filters for all the joints
filter_config_2d = {
'freq': 30, # system frequency about 30 Hz
'mincutoff': 1.7, # value refer to the paper
'beta': 0.3, # value refer to the paper
'dcutoff': 0.4 # not mentioned, empirically set
}
filter_config_3d = {
'freq': 30, # system frequency about 30 Hz
'mincutoff': 0.8, # value refer to the paper
'beta': 0.4, # value refer to the paper
'dcutoff': 0.4 # not mentioned, empirically set
}
self.filter_2d = [(OneEuroFilter(**filter_config_2d),
OneEuroFilter(**filter_config_2d))
for _ in range(self.joints_sum)]
self.filter_3d = [(OneEuroFilter(**filter_config_3d),
OneEuroFilter(**filter_config_3d),
OneEuroFilter(**filter_config_3d))
for _ in range(self.joints_sum)]
# load pretrained VNect model
self.sess = tf.Session()
if os.getcwd().endswith('src'):
saver = tf.train.import_meta_graph('../models/tf_model/vnect_tf.meta')
saver.restore(self.sess, tf.train.latest_checkpoint('../models/tf_model/'))
else:
saver = tf.train.import_meta_graph('./models/tf_model/vnect_tf.meta')
saver.restore(self.sess, tf.train.latest_checkpoint('./models/tf_model/'))
graph = tf.get_default_graph()
self.input_crops = graph.get_tensor_by_name('Placeholder:0')
self.heatmap = graph.get_tensor_by_name('split_2:0')
self.x_heatmap = graph.get_tensor_by_name('split_2:1')
self.y_heatmap = graph.get_tensor_by_name('split_2:2')
self.z_heatmap = graph.get_tensor_by_name('split_2:3')
print('VNect Estimator initialized.')
@staticmethod
def gen_input_batch(img_input, box_size, scales):
# input image --> sqrared image acceptable for the model
img_square, scaler, [offset_x, offset_y] = utils.img_scale_squarify(img_input, box_size)
# generate multi-scale image batch
input_batch = []
for scale in scales:
img = utils.img_scale_padding(img_square, scale, box_size) if scale < 1 else img_square
input_batch.append(img)
# image value range: [0, 255) --> [-0.4, 0.6)
input_batch = np.asarray(input_batch, dtype=np.float32) / 255 - 0.4
return input_batch, scaler, [offset_x, offset_y]
def joint_filter(self, joints, dim=2):
t = time.time()
if dim == 2:
for i in range(self.joints_sum):
joints[i, 0] = self.filter_2d[i][0](joints[i, 0], t)
joints[i, 1] = self.filter_2d[i][1](joints[i, 1], t)
else:
for i in range(self.joints_sum):
joints[i, 0] = self.filter_3d[i][0](joints[i, 0], t)
joints[i, 1] = self.filter_3d[i][1](joints[i, 1], t)
joints[i, 2] = self.filter_3d[i][2](joints[i, 2], t)
return joints
def __call__(self, img_input):
t0 = time.time()
img_batch, scaler, [offset_x, offset_y] = self.gen_input_batch(img_input, self.box_size, self.scales)
hm, xm, ym, zm = self.sess.run([self.heatmap,
self.x_heatmap,
self.y_heatmap,
self.z_heatmap],
{self.input_crops: img_batch})
# averaging the outputs with different scales
hm_size = self.box_size // self.hm_factor
hm_avg = np.zeros((hm_size, hm_size, self.joints_sum))
xm_avg = np.zeros((hm_size, hm_size, self.joints_sum))
ym_avg = np.zeros((hm_size, hm_size, self.joints_sum))
zm_avg = np.zeros((hm_size, hm_size, self.joints_sum))
for i in range(len(self.scales)):
rescale = 1.0 / self.scales[i]
scaled_hm = utils.img_scale(hm[i, :, :, :], rescale)
scaled_x_hm = utils.img_scale(xm[i, :, :, :], rescale)
scaled_y_hm = utils.img_scale(ym[i, :, :, :], rescale)
scaled_z_hm = utils.img_scale(zm[i, :, :, :], rescale)
mid = [scaled_hm.shape[0] // 2, scaled_hm.shape[1] // 2]
hm_avg += scaled_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
xm_avg += scaled_x_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
ym_avg += scaled_y_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
zm_avg += scaled_z_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
hm_avg /= len(self.scales)
xm_avg /= len(self.scales)
ym_avg /= len(self.scales)
zm_avg /= len(self.scales)
# joints_2d are in box size scale
joints_2d = utils.extract_2d_joints(hm_avg, self.box_size, self.hm_factor)
joints_2d = self.joint_filter(joints_2d, dim=2)
joints_3d = utils.extract_3d_joints(joints_2d, xm_avg, ym_avg, zm_avg, self.hm_factor)
joints_3d = self.joint_filter(joints_3d, dim=3)
# rescale joints_2d to input image scale
joints_2d[:, 0] = (joints_2d[:, 0] - offset_y) / scaler
joints_2d[:, 1] = (joints_2d[:, 1] - offset_x) / scaler
print('FPS: {:>2.2f}'.format(1 / (time.time() - t0)))
return joints_2d, joints_3d
if __name__ == '__main__':
estimator = VNectEstimator()
j_2d, j_3d = estimator(cv2.imread('../pic/test_pic.jpg'))
print('\njoints_2d')
for i, j in enumerate(j_2d):
print(i, j)
print('\njoints_3d')
for i, j in enumerate(j_3d):
print(i, j)
|
the-stack_0_10366 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 24 17:19:18 2022
@author: justoliver
"""
import pymunk, sys
from pymunk.pygame_util import *
from pymunk.vec2d import Vec2d
import pygame
from pygame.locals import *
import numpy as np
from PIL import Image
from pymunk.pygame_util import DrawOptions
size = 800, 800
display = pygame.display.set_mode((size))
options = DrawOptions(display)
clock = pygame.time.Clock()
space = pymunk.Space()
space.gravity = 0, 981
b0 = space.static_body
b1 = space.static_body
FPS = 120
def convert_coordinates(point):
return int(point[0]), int(800-point[1])
def get_theta(x_h, x_1, y_h, y_1):
return np.arctan2(x_1 - x_h, y_1 - y_h)
def get_phi(x1, x2, y1, y2, theta):
return np.arctan2(x2 - x1, y2- y1) - theta
def get_iota(x1, x2, y1, y2, theta, phi):
return np.arctan2(x2 -x1, y2 - y1) - theta - phi
class measurement_body:
def __init__(self):
self.body = pymunk.Body()
self.body.position = (400,40)
self.shape = pymunk.Circle(self.body, 1)
self.shape.color = (255,0,0)
space.add(self.body, self.shape)
class Segment2:
def __init__(self, p0, a, b, radius=10, center_of_gravity = (0,0), density=0.01):
self.body = pymunk.Body()
self.body.position = p0
self.radius = radius
self.a = a
self.b = b
self.body.center_of_gravity = center_of_gravity
self.shape = pymunk.Segment(self.body, self.a, self.b, radius)
self.shape.density = density
self.shape.elasticity = 0
self.shape.filter = pymunk.ShapeFilter(group=1)
self.shape.color = (0, 255, 0, 0)
space.add(self.body, self.shape)
class Leg:
def __init__(self, p0, a, b, c, d, radius=10, center_of_gravity = (0,0), density=0.01):
self.body = pymunk.Body()
self.body.position = p0
self.radius = radius
self.a = a
self.b = b
self.c = c
self.d = d
self.body.center_of_gravity = center_of_gravity
self.leg= pymunk.Segment(self.body, self.a, self.b , radius=radius)
self.leg.filter = pymunk.ShapeFilter(group = 1)
self.leg.density = density
self.foot= pymunk.Segment(self.body, self.c, self.d)
s2.filter = pymunk.ShapeFilter(group = 1)
se
self.shape.elasticity = 0
self.shape.filter = pymunk.ShapeFilter(group=1)
self.shape.color = (0, 255, 0, 0)
space.add(self.body, self.shape)
class Simplemotor:
def __init__(self, b, b2, rate=5, switch="off"):
self.rate = rate
self.b = b
self.b2 = b2
self.simplemotor = pymunk.SimpleMotor(self.b, self.b2, self.rate)
self.switch = switch
def drive(self, constraints, phi):
if phi >= np.pi/2 and len(constraints) == 5:
space.remove(self.simplemotor)
elif self.switch == "off" and len(constraints) == 5:
space.remove(self.simplemotor)
elif self.switch == "on" and len(constraints) < 5 and phi < np.pi/2:
space.add(self.simplemotor)
class RotaryLimitJoint:
def __init__(self, b, b2, min, max, collide=True):
joint = pymunk.constraints.RotaryLimitJoint(b, b2, min, max)
joint.collide_bodies = collide
space.add(joint)
# class dead_hang_joint:
# def __init__(self, b, b2, min, max, collide=True):
# joint = pymunk.constraints.RotaryLimitJoint(b, b2, min, angvel1}\nseg2:{angvel2}")
# print(segment2.bomax)
# joint.collide_bodies = collide
# def dead_position(self, constraints, phi):
# if phi == 0 and len(constraints) < 6:
class PivotJoint:
def __init__(self, b, b2, a=(0, 0), a2=(0, 0), collide=True):
joint = pymunk.constraints.PinJoint(b, b2, a, a2)
joint.collide_bodies = collide
space.add(joint)
class PinJoint:
def __init__(self, b, b2, a=(0, 0), a2=(0, 0)):
joint = pymunk.constraints.PinJoint(b, b2, a, a2)
space.add(joint)
class Swing_body:
def __init__(self,p0, vx1,vy1,vx2,vy2,vx3,vy3, radius=10, center_of_gravity = (0,0), density=0.05):
self.body = pymunk.Body()
self.body.position = p0
s1 = pymunk.Segment(self.body, vx1, vy1 , radius=radius)
s1.filter = pymunk.ShapeFilter(group = 1)
s1.density = density
s2 = pymunk.Segment(self.body, vx2, vy2, radius=radius)
s2.filter = pymunk.ShapeFilter(group = 1)
s2.density = density
s3 = pymunk.Segment(self.body, vx3,vy3, radius=radius)
s3.filter = pymunk.ShapeFilter(group = 1)
s3.density = density
space.add(self.body, s1,s2,s3)
def angle_reached(theta, high_score):
if len(high_score) == 0:
high_score.append(theta)
elif high_score[0] < abs(theta):
high_score[0] = abs(theta)
highest_score = high_score[0]
return high_score
# b1 = measurement_body()
hinge_point1 = (0, -100) # seg 1
hinge_point2 = (0, 100)
swing_body = (400, 625)
swing_top1 = (30, -25)
swing_top2 = (-30, -25)
swing_mid1 = (0, -25)
swing_mid2 = (0, 25)
swing_bottom1 = (-20, 25)
swing_bottom2 = (20, 25)
hinge_point3 = (0, -30) # seg 2
hinge_point4 = (0, 30)
rate = 3
segment = Segment2((400 , 500), hinge_point1 , hinge_point2)
segment2 = Segment2((420,680), hinge_point3, hinge_point4, density= 0.05)
swing = Swing_body(swing_body, swing_top1,swing_top2, swing_mid1, swing_mid2, swing_bottom1, swing_bottom2)
PinJoint(swing.body, segment2.body, swing_bottom2, hinge_point3)
PinJoint(segment.body, swing.body, hinge_point2, swing_mid1)
PinJoint(b0, segment.body, (400,400), hinge_point1)
simplemotor = Simplemotor(swing.body, segment2.body, rate)
rotlimjoint = RotaryLimitJoint(swing.body, segment2.body, -np.pi/2, np.pi/4)
def game():
pygame.display.set_caption("Double pendulum interactive Simulation")
high_score = []
while True:
xh, yh = (400,400)
x1, y1 = segment.body.position[0], segment.body.position[1]
theta = get_theta(xh, x1, yh, y1)
x2, y2 = segment.body.position[0] + 100*np.sin(theta) , segment.body.position[1] + 100*np.cos(theta)
x3, y3 = swing.body.position[0], swing.body.position[1]
phi = get_phi(x2, x3, y2, y3, theta)
x4, y4 = swing.body.position[0] + 25*np.sin(theta+phi) + 20*np.cos(theta+phi), swing.body.position[1] + 25*np.cos(theta+phi) - 20*np.sin(theta+phi)
x5, y5 = segment2.body.position[0], segment2.body.position[1]
iota = get_iota(x4, x5, y4, y5, theta, phi)
print(f"iota={iota}")
angvel1 = swing.body.angular_velocity
angvel2 = -segment2.body.angular_velocity
# print(f"seg1:{angvel1}\nseg2:{angvel2}")
# print(segment2.body.angular_velocity)
# abs_vel = np.sqrt(segment.body.velocity[0]**2 + segment.body.velocity[1]**2)
# if segment.body.velocity[0]< 1:
# rad_vel = -abs_vel/150
# else:
# rad_vel = abs_vel/150
# print(rad_vel)
for event in pygame.event.get(): # checking for user input
if event.type == pygame.QUIT:
print(f"Highest angle reached was:{np.rad2deg(high_score)}")
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]: # kick input
simplemotor.switch = "on"
if iota >= np.pi/2:
if len(space.constraints) == 5:
space.remove(simplemotor.simplemotor)
segment2.body.angular_velocity = angvel1
else:
simplemotor.drive(space.constraints, phi)
else:
simplemotor.switch = "off"
if iota <= 0:
segment2.body.angular_velocity = angvel1
else:
simplemotor.drive(space.constraints, phi)
high_score = angle_reached(theta, high_score)
display.fill((255, 255, 255))
space.debug_draw(options)
pygame.display.update()
clock.tick(FPS) # limiting frames per second to 120
space.step(1/FPS)
game()
pygame.quit() |
the-stack_0_10369 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# You can find misc modules, which dont fit in anything xD
""" Userbot module for other small commands. """
from random import randint
from asyncio import sleep
from os import execl
import sys
import os
import io
import sys
import json
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from userbot.events import register
@register(outgoing=True, pattern="^.random")
async def randomise(items):
""" For .random command, get a random item from the list of items. """
itemo = (items.text[8:]).split()
if len(itemo) < 2:
await items.edit(
"`2 or more items are required! Check .help random for more info.`"
)
return
index = randint(1, len(itemo) - 1)
await items.edit("**Query: **\n`" + items.text[8:] + "`\n**Output: **\n`" +
itemo[index] + "`")
@register(outgoing=True, pattern="^.sleep( [0-9]+)?$")
async def sleepybot(time):
""" For .sleep command, let the userbot snooze for a few second. """
message = time.text
if " " not in time.pattern_match.group(1):
await time.reply("Syntax: `.sleep [seconds]`")
else:
counter = int(time.pattern_match.group(1))
await time.edit("`I am sulking and snoozing....`")
await sleep(2)
if BOTLOG:
await time.client.send_message(
BOTLOG_CHATID,
"You put the bot to sleep for " + str(counter) + " seconds",
)
await sleep(counter)
await time.edit("`OK, I'm awake now.`")
@register(outgoing=True, pattern="^.shutdown$")
async def killdabot(event):
""" For .shutdown command, shut the bot down."""
await event.edit("`Goodbye *Windows XP shutdown sound*....`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#SHUTDOWN \n"
"Bot shut down")
await bot.disconnect()
@register(outgoing=True, pattern="^.restart$")
async def killdabot(event):
await event.edit("`*i would be back in a moment*`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#RESTART \n"
"Bot Restarted")
await bot.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
# Shut the existing one down
exit()
@register(outgoing=True, pattern="^.community$")
async def bot_community(community):
""" For .community command, just returns OG Paperplane's group link. """
await community.edit(
"Join RaphielGang's awesome userbot community: @userbot_support"
"\nDo note that Paperplane Extended is an unoficial fork of their "
"Paperplane project and it may get limited or no support for bugs.")
@register(outgoing=True, pattern="^.support$")
async def bot_support(wannahelp):
""" For .support command, just returns the group link. """
await wannahelp.edit(
"Join the OpenUserBot Channel: @PaperPlaneExtended_news \
\nJoin the OpenUserBot Chat: @PPE_Support")
@register(outgoing=True, pattern="^.creator$")
async def creator(e):
await e.edit("[TeKnoways](https://t.me/Three_Cube_TeKnoways)")
@register(outgoing=True, pattern="^.readme$")
async def reedme(e):
await e.edit(
"Here's something for you to read:\n"
"\n[OpenUserBot's README.md file](https://github.com/mkaraniya/OpenUserBot/blob/sql-extended/README.md)"
"\n[Setup Guide - Basic](https://telegra.ph/How-to-host-a-Telegram-Userbot-11-02)"
"\n[Setup Guide - Google Drive](https://telegra.ph/How-To-Setup-GDrive-11-02)"
"\n[Setup Guide - LastFM Module](https://telegra.ph/How-to-set-up-LastFM-module-for-Paperplane-userbot-11-02)"
"\n[Video Tutorial - 576p](https://mega.nz/#!ErwCESbJ!1ZvYAKdTEfb6y1FnqqiLhHH9vZg4UB2QZNYL9fbQ9vs)"
"\n[Video Tutorial - 1080p](https://mega.nz/#!x3JVhYwR!u7Uj0nvD8_CyyARrdKrFqlZEBFTnSVEiqts36HBMr-o)"
"\n[Special - Note](https://telegra.ph/Special-Note-11-02)")
# Copyright (c) Gegham Zakaryan | 2019
@register(outgoing=True, pattern="^.repeat (.*)")
async def repeat(rep):
cnt, txt = rep.pattern_match.group(1).split(' ', 1)
replyCount = int(cnt)
toBeRepeated = txt
replyText = toBeRepeated + "\n"
for i in range(0, replyCount - 1):
replyText += toBeRepeated + "\n"
await rep.edit(replyText)
@register(outgoing=True, pattern="^.repo$")
async def repo_is_here(wannasee):
""" For .repo command, just returns the repo URL. """
await wannasee.edit(
"Click [here](https://github.com/ayixx619/ppek.git) to open my kang userbot page."
)
@register(outgoing=True, pattern="^.raw$")
async def raw(event):
the_real_message = None
reply_to_id = None
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
the_real_message = previous_message.stringify()
reply_to_id = event.reply_to_msg_id
else:
the_real_message = event.stringify()
reply_to_id = event.message.id
with io.BytesIO(str.encode(the_real_message)) as out_file:
out_file.name = "raw_message_data.txt"
await event.edit(
"`Check the userbot log for the decoded message data !!`")
await event.client.send_file(
BOTLOG_CHATID,
out_file,
force_document=True,
allow_cache=False,
reply_to=reply_to_id,
caption="`Here's the decoded message data !!`")
CMD_HELP.update({
'random':
'.random <item1> <item2> ... <itemN>\
\nUsage: Get a random item from the list of items.'
})
CMD_HELP.update({
'sleep':
'.sleep <seconds>\
\nUsage: Userbots get tired too. Let yours snooze for a few seconds.'
})
CMD_HELP.update({
"shutdown":
".shutdown\
\nUsage: Sometimes you need to shut down your bot. Sometimes you just hope to\
hear Windows XP shutdown sound... but you don't."
})
CMD_HELP.update(
{'support': ".support\
\nUsage: If you need help, use this command."})
CMD_HELP.update({
'community':
".community\
\nUsage: Join the awesome Paperplane userbot community !!"
})
CMD_HELP.update({
'repo':
'.repo\
\nUsage: If you are curious what makes the userbot work, this is what you need.'
})
CMD_HELP.update({
"readme":
".readme\
\nUsage: Provide links to setup the userbot and it's modules."
})
CMD_HELP.update(
{"creator": ".creator\
\nUsage: Know who created this awesome userbot !!"})
CMD_HELP.update({
"repeat":
".repeat <no.> <text>\
\nUsage: Repeats the text for a number of times. Don't confuse this with spam tho."
})
CMD_HELP.update({"restart": ".restart\
\nUsage: Restarts the bot !!"})
CMD_HELP.update({
"raw":
".raw\
\nUsage: Get detailed JSON-like formatted data about replied message."
})
|
the-stack_0_10370 | # coding: utf-8
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the collections editor."""
from core.controllers import base
from core.domain import collection_services
from core.domain import config_domain
from core.domain import rights_manager
from core.domain import summary_services
from core.platform import models
import feconf
import utils
current_user_services = models.Registry.import_current_user_services()
def _require_valid_version(version_from_payload, collection_version):
"""Check that the payload version matches the given collection version."""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != collection_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_version, version_from_payload))
def require_editor(handler):
"""Decorator that checks if the user can edit the given collection."""
def test_collection_editor(self, collection_id, **kwargs):
"""Gets the user and collection id if the user can edit it.
Args:
self: the handler instance
collection_id: the collection id
**kwargs: any other arguments passed to the handler
Returns:
The relevant handler, if the user is authorized to edit this
collection.
Raises:
self.PageNotFoundException: if no such collection exists.
self.UnauthorizedUserException: if the user exists but does not
have the right credentials.
"""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if (self.username in config_domain.BANNED_USERNAMES.value
or self.username not in
config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value):
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
try:
collection_services.get_collection_by_id(collection_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, collection_id):
raise self.UnauthorizedUserException(
'You do not have the credentials to edit this collection.',
self.user_id)
return handler(self, collection_id, **kwargs)
return test_collection_editor
class CollectionEditorHandler(base.BaseHandler):
"""Base class for all handlers for the collection editor page."""
pass
class CollectionEditorPage(CollectionEditorHandler):
"""The editor page for a single collection."""
@require_editor
def get(self, collection_id):
"""Handles GET requests."""
collection = collection_services.get_collection_by_id(
collection_id, strict=False)
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.Actor(
self.user_id).can_unpublish(
feconf.ACTIVITY_TYPE_COLLECTION, collection_id),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'nav_mode': feconf.NAV_MODE_CREATE,
'title': collection.title,
'SHOW_COLLECTION_NAVIGATION_TAB_HISTORY': (
feconf.SHOW_COLLECTION_NAVIGATION_TAB_HISTORY),
'SHOW_COLLECTION_NAVIGATION_TAB_STATS': (
feconf.SHOW_COLLECTION_NAVIGATION_TAB_STATS),
'TAG_REGEX': feconf.TAG_REGEX,
})
self.render_template('pages/collection_editor/collection_editor.html')
class EditableCollectionDataHandler(CollectionEditorHandler):
"""A data handler for collections which supports writing."""
def _require_valid_version(self, version_from_payload, collection_version):
"""Check that the payload version matches the given collection version.
"""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != collection_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_version, version_from_payload))
@require_editor
def get(self, collection_id):
"""Populates the data on the individual collection page."""
try:
# Try to retrieve collection
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user_id,
allow_invalid_explorations=True))
except Exception as e:
raise self.PageNotFoundException(e)
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
@require_editor
def put(self, collection_id):
"""Updates properties of the given collection."""
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
self._require_valid_version(version, collection.version)
commit_message = self.payload.get('commit_message')
change_list = self.payload.get('change_list')
try:
collection_services.update_collection(
self.user_id, collection_id, change_list, commit_message)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user_id, allow_invalid_explorations=True))
# Send the updated collection back to the frontend.
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
class CollectionRightsHandler(CollectionEditorHandler):
"""Handles management of collection editing rights."""
@require_editor
def put(self, collection_id):
"""Updates the editing rights for the given collection."""
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
# TODO(bhenning): Implement other rights changes here.
is_public = self.payload.get('is_public')
if is_public is not None:
if is_public:
try:
collection.validate(strict=True)
collection_services.validate_exps_in_collection_are_public(
collection)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
collection_services.publish_collection_and_update_user_profiles(
self.user_id, collection_id)
collection_services.index_collections_given_ids([
collection_id])
elif rights_manager.Actor(self.user_id).can_unpublish(
feconf.ACTIVITY_TYPE_COLLECTION, collection_id):
rights_manager.unpublish_collection(self.user_id, collection_id)
collection_services.delete_documents_from_search_index([
collection_id])
else:
raise self.InvalidInputException(
'Cannot unpublish a collection.')
self.render_json({
'rights': rights_manager.get_collection_rights(
collection_id).to_dict()
})
class ExplorationMetadataSearchHandler(base.BaseHandler):
"""Provides data for exploration search."""
def get(self):
"""Handles GET requests."""
query_string = self.request.get('q')
search_cursor = self.request.get('cursor', None)
collection_node_metadata_list, new_search_cursor = (
summary_services.get_exp_metadata_dicts_matching_query(
query_string, search_cursor, self.user_id))
self.values.update({
'collection_node_metadata_list': collection_node_metadata_list,
'search_cursor': new_search_cursor,
})
self.render_json(self.values)
|
the-stack_0_10372 | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.agent import agent
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import filterPairValues
from lib.core.common import getLimitRange
from lib.core.common import isInferenceAvailable
from lib.core.common import isNoneValue
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.common import readInput
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import safeStringFormat
from lib.core.common import unArrayizeValue
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import DBMS
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import CURRENT_DB
from lib.core.settings import METADB_SUFFIX
from lib.request import inject
from lib.techniques.brute.use import columnExists
from lib.techniques.brute.use import tableExists
class Search:
"""
This class defines search functionalities for plugins.
"""
def __init__(self):
pass
def searchDb(self):
foundDbs = []
rootQuery = queries[Backend.getIdentifiedDbms()].search_db
dbList = conf.db.split(",")
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
dbCond = rootQuery.inband.condition2
else:
dbCond = rootQuery.inband.condition
dbConsider, dbCondParam = self.likeOrExact("database")
for db in dbList:
values = []
db = safeSQLIdentificatorNaming(db)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
db = db.upper()
infoMsg = "searching database"
if dbConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(db)
logger.info(infoMsg)
if conf.excludeSysDbs:
exclDbsQuery = "".join(" AND '%s' != %s" % (unsafeSQLIdentificatorNaming(db), dbCond) for db in self.excludeDbsList)
infoMsg = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(db for db in self.excludeDbsList))
logger.info(infoMsg)
else:
exclDbsQuery = ""
dbQuery = "%s%s" % (dbCond, dbCondParam)
dbQuery = dbQuery % unsafeSQLIdentificatorNaming(db)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.inband.query2
else:
query = rootQuery.inband.query
query = query % (dbQuery + exclDbsQuery)
values = inject.getValue(query, blind=False, time=False)
if not isNoneValue(values):
values = arrayizeValue(values)
for value in values:
value = safeSQLIdentificatorNaming(value)
foundDbs.append(value)
if not values and isInferenceAvailable() and not conf.direct:
infoMsg = "fetching number of database"
if dbConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(db)
logger.info(infoMsg)
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.blind.count2
else:
query = rootQuery.blind.count
query = query % (dbQuery + exclDbsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no database"
if dbConsider == "1":
warnMsg += "s like"
warnMsg += " '%s' found" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.blind.query2
else:
query = rootQuery.blind.query
query = query % (dbQuery + exclDbsQuery)
query = agent.limitQuery(index, query, dbCond)
value = unArrayizeValue(inject.getValue(query, union=False, error=False))
value = safeSQLIdentificatorNaming(value)
foundDbs.append(value)
conf.dumper.lister("found databases", foundDbs)
def searchTable(self):
bruteForce = False
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
errMsg = "information_schema not available, "
errMsg += "back-end DBMS is MySQL < 5.0"
bruteForce = True
if bruteForce:
message = "do you want to use common table existence check? %s" % ("[Y/n/q]" if Backend.getIdentifiedDbms() in (DBMS.ACCESS,) else "[y/N/q]")
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
regex = "|".join(conf.tbl.split(","))
return tableExists(paths.COMMON_TABLES, regex)
foundTbls = {}
tblList = conf.tbl.split(",")
rootQuery = queries[Backend.getIdentifiedDbms()].search_table
tblCond = rootQuery.inband.condition
dbCond = rootQuery.inband.condition2
tblConsider, tblCondParam = self.likeOrExact("table")
for tbl in tblList:
values = []
tbl = safeSQLIdentificatorNaming(tbl, True)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2, DBMS.FIREBIRD):
tbl = tbl.upper()
infoMsg = "searching table"
if tblConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
if dbCond and conf.db and conf.db != CURRENT_DB:
_ = conf.db.split(",")
whereDbsQuery = " AND (" + " OR ".join("%s = '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in _) + ")"
infoMsg += " for database%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(db for db in _))
elif conf.excludeSysDbs:
whereDbsQuery = "".join(" AND '%s' != %s" % (unsafeSQLIdentificatorNaming(db), dbCond) for db in self.excludeDbsList)
infoMsg2 = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(db for db in self.excludeDbsList))
logger.info(infoMsg2)
else:
whereDbsQuery = ""
logger.info(infoMsg)
tblQuery = "%s%s" % (tblCond, tblCondParam)
tblQuery = tblQuery % unsafeSQLIdentificatorNaming(tbl)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
query = rootQuery.inband.query
query = query % (tblQuery + whereDbsQuery)
values = inject.getValue(query, blind=False, time=False)
if values and Backend.getIdentifiedDbms() in (DBMS.SQLITE, DBMS.FIREBIRD):
newValues = []
if isinstance(values, basestring):
values = [values]
for value in values:
dbName = "SQLite" if Backend.isDbms(DBMS.SQLITE) else "Firebird"
newValues.append(["%s%s" % (dbName, METADB_SUFFIX), value])
values = newValues
for foundDb, foundTbl in filterPairValues(values):
foundDb = safeSQLIdentificatorNaming(foundDb)
foundTbl = safeSQLIdentificatorNaming(foundTbl, True)
if foundDb is None or foundTbl is None:
continue
if foundDb in foundTbls:
foundTbls[foundDb].append(foundTbl)
else:
foundTbls[foundDb] = [foundTbl]
if not values and isInferenceAvailable() and not conf.direct:
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
if len(whereDbsQuery) == 0:
infoMsg = "fetching number of databases with table"
if tblConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
logger.info(infoMsg)
query = rootQuery.blind.count
query = query % (tblQuery + whereDbsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no databases have table"
if tblConsider == "1":
warnMsg += "s like"
warnMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query
query = query % (tblQuery + whereDbsQuery)
query = agent.limitQuery(index, query)
foundDb = unArrayizeValue(inject.getValue(query, union=False, error=False))
foundDb = safeSQLIdentificatorNaming(foundDb)
if foundDb not in foundTbls:
foundTbls[foundDb] = []
if tblConsider == "2":
foundTbls[foundDb].append(tbl)
if tblConsider == "2":
continue
else:
for db in conf.db.split(",") if conf.db else (self.getCurrentDb(),):
db = safeSQLIdentificatorNaming(db)
if db not in foundTbls:
foundTbls[db] = []
else:
dbName = "SQLite" if Backend.isDbms(DBMS.SQLITE) else "Firebird"
foundTbls["%s%s" % (dbName, METADB_SUFFIX)] = []
for db in foundTbls.keys():
db = safeSQLIdentificatorNaming(db)
infoMsg = "fetching number of table"
if tblConsider == "1":
infoMsg += "s like"
infoMsg += " '%s' in database '%s'" % (unsafeSQLIdentificatorNaming(tbl), unsafeSQLIdentificatorNaming(db))
logger.info(infoMsg)
query = rootQuery.blind.count2
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
query = query % unsafeSQLIdentificatorNaming(db)
query += " AND %s" % tblQuery
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no table"
if tblConsider == "1":
warnMsg += "s like"
warnMsg += " '%s' " % unsafeSQLIdentificatorNaming(tbl)
warnMsg += "in database '%s'" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query2
if query.endswith("'%s')"):
query = query[:-1] + " AND %s)" % tblQuery
else:
query += " AND %s" % tblQuery
if Backend.isDbms(DBMS.FIREBIRD):
query = safeStringFormat(query, index)
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
query = safeStringFormat(query, unsafeSQLIdentificatorNaming(db))
if not Backend.isDbms(DBMS.FIREBIRD):
query = agent.limitQuery(index, query)
foundTbl = unArrayizeValue(inject.getValue(query, union=False, error=False))
if not isNoneValue(foundTbl):
kb.hintValue = foundTbl
foundTbl = safeSQLIdentificatorNaming(foundTbl, True)
foundTbls[db].append(foundTbl)
for db in foundTbls.keys():
if isNoneValue(foundTbls[db]):
del foundTbls[db]
if not foundTbls:
warnMsg = "no databases contain any of the provided tables"
logger.warn(warnMsg)
return
conf.dumper.dbTables(foundTbls)
self.dumpFoundTables(foundTbls)
def searchColumn(self):
bruteForce = False
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
errMsg = "information_schema not available, "
errMsg += "back-end DBMS is MySQL < 5.0"
bruteForce = True
if bruteForce:
message = "do you want to use common column existence check? %s" % ("[Y/n/q]" if Backend.getIdentifiedDbms() in (DBMS.ACCESS,) else "[y/N/q]")
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
regex = '|'.join(conf.col.split(','))
conf.dumper.dbTableColumns(columnExists(paths.COMMON_COLUMNS, regex))
message = "do you want to dump entries? [Y/n] "
output = readInput(message, default="Y")
if output and output[0] not in ("n", "N"):
self.dumpAll()
return
rootQuery = queries[Backend.getIdentifiedDbms()].search_column
foundCols = {}
dbs = {}
whereDbsQuery = ""
whereTblsQuery = ""
infoMsgTbl = ""
infoMsgDb = ""
colList = conf.col.split(",")
if conf.excludeCol:
colList = [_ for _ in colList if _ not in conf.excludeCol.split(',')]
origTbl = conf.tbl
origDb = conf.db
colCond = rootQuery.inband.condition
dbCond = rootQuery.inband.condition2
tblCond = rootQuery.inband.condition3
colConsider, colCondParam = self.likeOrExact("column")
for column in colList:
values = []
column = safeSQLIdentificatorNaming(column)
conf.db = origDb
conf.tbl = origTbl
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
column = column.upper()
infoMsg = "searching column"
if colConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
foundCols[column] = {}
if conf.tbl:
_ = conf.tbl.split(",")
whereTblsQuery = " AND (" + " OR ".join("%s = '%s'" % (tblCond, unsafeSQLIdentificatorNaming(tbl)) for tbl in _) + ")"
infoMsgTbl = " for table%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(tbl) for tbl in _))
if conf.db and conf.db != CURRENT_DB:
_ = conf.db.split(",")
whereDbsQuery = " AND (" + " OR ".join("%s = '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in _) + ")"
infoMsgDb = " in database%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(db) for db in _))
elif conf.excludeSysDbs:
whereDbsQuery = "".join(" AND %s != '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in self.excludeDbsList)
infoMsg2 = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(db) for db in self.excludeDbsList))
logger.info(infoMsg2)
else:
infoMsgDb = " across all databases"
logger.info("%s%s%s" % (infoMsg, infoMsgTbl, infoMsgDb))
colQuery = "%s%s" % (colCond, colCondParam)
colQuery = colQuery % unsafeSQLIdentificatorNaming(column)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if not all((conf.db, conf.tbl)):
# Enumerate tables containing the column provided if
# either of database(s) or table(s) is not provided
query = rootQuery.inband.query
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
values = inject.getValue(query, blind=False, time=False)
else:
# Assume provided databases' tables contain the
# column(s) provided
values = []
for db in conf.db.split(","):
for tbl in conf.tbl.split(","):
values.append([safeSQLIdentificatorNaming(db), safeSQLIdentificatorNaming(tbl, True)])
for db, tbl in filterPairValues(values):
db = safeSQLIdentificatorNaming(db)
tbls = tbl.split(",") if not isNoneValue(tbl) else []
for tbl in tbls:
tbl = safeSQLIdentificatorNaming(tbl, True)
if db is None or tbl is None:
continue
conf.db = db
conf.tbl = tbl
conf.col = column
self.getColumns(onlyColNames=True, colTuple=(colConsider, colCondParam), bruteForce=False)
if db in kb.data.cachedColumns and tbl in kb.data.cachedColumns[db]:
if db not in dbs:
dbs[db] = {}
if tbl not in dbs[db]:
dbs[db][tbl] = {}
dbs[db][tbl].update(kb.data.cachedColumns[db][tbl])
if db in foundCols[column]:
foundCols[column][db].append(tbl)
else:
foundCols[column][db] = [tbl]
kb.data.cachedColumns = {}
if not values and isInferenceAvailable() and not conf.direct:
if not conf.db:
infoMsg = "fetching number of databases with tables containing column"
if colConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
logger.info("%s%s%s" % (infoMsg, infoMsgTbl, infoMsgDb))
query = rootQuery.blind.count
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no databases have tables containing column"
if colConsider == "1":
warnMsg += "s like"
warnMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
logger.warn("%s%s" % (warnMsg, infoMsgTbl))
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
query = agent.limitQuery(index, query)
db = unArrayizeValue(inject.getValue(query, union=False, error=False))
db = safeSQLIdentificatorNaming(db)
if db not in dbs:
dbs[db] = {}
if db not in foundCols[column]:
foundCols[column][db] = []
else:
for db in conf.db.split(",") if conf.db else (self.getCurrentDb(),):
db = safeSQLIdentificatorNaming(db)
if db not in foundCols[column]:
foundCols[column][db] = []
origDb = conf.db
origTbl = conf.tbl
for column, dbData in foundCols.items():
colQuery = "%s%s" % (colCond, colCondParam)
colQuery = colQuery % unsafeSQLIdentificatorNaming(column)
for db in dbData:
conf.db = origDb
conf.tbl = origTbl
infoMsg = "fetching number of tables containing column"
if colConsider == "1":
infoMsg += "s like"
infoMsg += " '%s' in database '%s'" % (unsafeSQLIdentificatorNaming(column), unsafeSQLIdentificatorNaming(db))
logger.info(infoMsg)
query = rootQuery.blind.count2
query = query % unsafeSQLIdentificatorNaming(db)
query += " AND %s" % colQuery
query += whereTblsQuery
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no tables contain column"
if colConsider == "1":
warnMsg += "s like"
warnMsg += " '%s' " % unsafeSQLIdentificatorNaming(column)
warnMsg += "in database '%s'" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query2
if query.endswith("'%s')"):
query = query[:-1] + " AND %s)" % (colQuery + whereTblsQuery)
else:
query += " AND %s" % (colQuery + whereTblsQuery)
query = safeStringFormat(query, unsafeSQLIdentificatorNaming(db))
query = agent.limitQuery(index, query)
tbl = unArrayizeValue(inject.getValue(query, union=False, error=False))
kb.hintValue = tbl
tbl = safeSQLIdentificatorNaming(tbl, True)
conf.db = db
conf.tbl = tbl
conf.col = column
self.getColumns(onlyColNames=True, colTuple=(colConsider, colCondParam), bruteForce=False)
if db in kb.data.cachedColumns and tbl in kb.data.cachedColumns[db]:
if db not in dbs:
dbs[db] = {}
if tbl not in dbs[db]:
dbs[db][tbl] = {}
dbs[db][tbl].update(kb.data.cachedColumns[db][tbl])
kb.data.cachedColumns = {}
if db in foundCols[column]:
foundCols[column][db].append(tbl)
else:
foundCols[column][db] = [tbl]
if dbs:
conf.dumper.dbColumns(foundCols, colConsider, dbs)
self.dumpFoundColumn(dbs, foundCols, colConsider)
else:
warnMsg = "no databases have tables containing any of the "
warnMsg += "provided columns"
logger.warn(warnMsg)
def search(self):
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
for item in ('db', 'tbl', 'col'):
if getattr(conf, item, None):
setattr(conf, item, getattr(conf, item).upper())
if conf.col:
self.searchColumn()
elif conf.tbl:
self.searchTable()
elif conf.db:
self.searchDb()
else:
errMsg = "missing parameter, provide -D, -T or -C along "
errMsg += "with --search"
raise SqlmapMissingMandatoryOptionException(errMsg)
|
the-stack_0_10375 | import logging
logging.basicConfig(level=logging.DEBUG)
from aiomailserver.core.controller import MailServerController
import asyncio
def exception_handler(*args, **kwargs):
logging.exception(args)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.set_exception_handler(exception_handler)
server = MailServerController(loop=loop)
server.loop.run_until_complete(server.start())
try:
server.loop.run_forever()
finally:
server.loop.run_until_complete(server.close())
|
the-stack_0_10377 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from functools import partial
from pathlib import Path
from typing import Optional
from pants.base.build_root import BuildRoot
from pants.fs.archive import TGZ
from pants.init.repro import Repro, Reproducer
from pants.testutil.subsystem.util import global_subsystem_instance
from pants.util.contextutil import pushd, temporary_dir
from pants.util.dirutil import safe_file_dump
class ReproTest(unittest.TestCase):
@staticmethod
def add_file(root: Path, relpath: str, *, content: str = '') -> None:
full_path = Path(root, relpath)
safe_file_dump(str(full_path), payload=content)
def assert_file(
self, root: Path, relpath: str, *, expected_content: Optional[str] = None
) -> None:
full_path = Path(root, relpath)
self.assertTrue(full_path.exists())
if expected_content is not None:
self.assertEqual(expected_content, full_path.read_text())
def assert_not_exists(self, root: Path, relpath: str) -> None:
self.assertFalse(Path(root, relpath).exists())
def test_repro(self) -> None:
"""Verify that Repro object creates expected tar.gz file"""
with temporary_dir() as tmpdir:
fake_buildroot = Path(tmpdir, 'buildroot')
add_file = partial(self.add_file, fake_buildroot)
add_file('.git/foo', content='foo')
add_file('dist/bar', content='bar')
add_file('baz.txt', content='baz')
add_file('qux/quux.txt', content='quux')
repro_file = Path(tmpdir, 'repro.tar.gz')
repro = Repro(str(repro_file), str(fake_buildroot), ignore=['.git', 'dist'])
repro.capture(run_info_dict={'foo': 'bar', 'baz': 'qux'})
extract_dir = Path(tmpdir, 'extract')
TGZ.extract(str(repro_file), str(extract_dir))
assert_file = partial(self.assert_file, extract_dir)
assert_file('baz.txt', expected_content='baz')
assert_file('qux/quux.txt', expected_content='quux')
assert_file('repro.sh')
assert_not_exists = partial(self.assert_not_exists, extract_dir)
assert_not_exists('.git')
assert_not_exists('dist')
def test_ignore_dir(self) -> None:
"""Verify that passing --repro-ignore option ignores the directory"""
# Buildroot is is based on your cwd so we need to step into a fresh
# directory for repro to look at.
root_instance = BuildRoot()
with temporary_dir() as build_root, \
root_instance.temporary(build_root), \
pushd(build_root), \
temporary_dir() as capture_dir:
add_file = partial(self.add_file, build_root)
add_file('pants.ini')
add_file('.git/foo', content='foo')
add_file('dist/bar', content='bar')
add_file('foo/bar', content='baz')
add_file('src/test1', content='test1')
add_file('src/test2', content='test1')
repro_file = Path(capture_dir, 'repro.tar.gz')
options = {
Reproducer.options_scope: dict(
capture=str(repro_file),
ignore=['src'],
)}
repro_sub = global_subsystem_instance(Reproducer, options=options)
repro = repro_sub.create_repro() # This is normally called in pants_exe.
repro.capture(run_info_dict={})
extract_loc = Path(capture_dir, 'extract')
TGZ.extract(str(repro_file), str(extract_loc))
self.assert_file(extract_loc, 'foo/bar', expected_content='baz')
assert_not_exists = partial(self.assert_not_exists, extract_loc)
assert_not_exists('.git')
assert_not_exists('src')
|
the-stack_0_10378 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Adding New Instrument
=====================
Any new instrument should be a subclass of Instrument and it must have a name.
When a new instrument is added to Workload Automation, the methods of the new
instrument will be found automatically and hooked up to the supported signals.
Once a signal is broadcasted, the corresponding registered method is invoked.
Each method in Instrument must take two arguments, which are self and context.
Supported signals can be found in [... link to signals ...] To make
implementations easier and common, the basic steps to add new instrument is
similar to the steps to add new workload.
Hence, the following methods are sufficient to implement to add new instrument:
- setup: This method is invoked after the workload is setup. All the
necessary setups should go inside this method. Setup, includes operations
like, pushing the files to the target device, install them, clear logs,
etc.
- start: It is invoked just before the workload start execution. Here is
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
For example, to add an instrument which will trace device errors, we subclass
Instrument and overwrite the variable name.::
#BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, device):
super(TraceErrorsInstrument, self).__init__(device)
self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
We then declare and implement the aforementioned methods. For the setup method,
we want to push the file to the target device and then change the file mode to
755 ::
def setup(self, context):
self.device.push_file(BINARY_FILE, self.device.working_directory)
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
Then we implemented the start method, which will simply run the file to start
tracing. ::
def start(self, context):
self.device.execute('{} start'.format(self.trace_on_device))
Lastly, we need to stop tracing once the workload stops and this happens in the
stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull_file(result, context.working_directory)
# parse the file if needs to be parsed, or add result to
# context.result
At the end, we might want to delete any files generated by the instrumentation
and the code to clear these file goes in teardown method. ::
def teardown(self, context):
self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
"""
import logging
import inspect
from collections import OrderedDict
import wlauto.core.signal as signal
from wlauto.core.extension import Extension
from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError
from wlauto.utils.misc import get_traceback, isiterable
from wlauto.utils.types import identifier
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.RUN_INIT),
('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('start', signal.BEFORE_WORKLOAD_EXECUTION),
('stop', signal.AFTER_WORKLOAD_EXECUTION),
('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('finalize', signal.RUN_FIN),
('on_run_start', signal.RUN_START),
('on_run_end', signal.RUN_END),
('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
('on_iteration_start', signal.ITERATION_START),
('on_iteration_end', signal.ITERATION_END),
('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
('after_initial_boot', signal.AFTER_INITIAL_BOOT),
('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
('before_boot', signal.BEFORE_BOOT),
('on_successful_boot', signal.SUCCESSFUL_BOOT),
('after_boot', signal.AFTER_BOOT),
('on_spec_init', signal.SPEC_INIT),
('on_run_init', signal.RUN_INIT),
('on_iteration_init', signal.ITERATION_INIT),
('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
('on_error', signal.ERROR_LOGGED),
('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if identifier(instrument) in [identifier(i.name) for i in installed]:
return True
return False
def is_enabled(instrument):
if isinstance(instrument, Instrument) or isinstance(instrument, type):
name = instrument.name
else: # assume string
name = instrument
try:
installed_instrument = get_instrument(name)
return installed_instrument.is_enabled
except ValueError:
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in instrument {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
argspec = inspect.getargspec(attr)
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
# context. However, we also allow callbacks to capture the context
# in variable arguments (declared as "*args" in the definition).
if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
message = '{} must take exactly 2 positional arguments; {} given.'
raise ValueError(message.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if identifier(installed_inst.name) == identifier(inst):
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Extension):
"""
Base class for instrumentation implementations.
"""
def __init__(self, device, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.device = device
self.is_enabled = True
self.is_broken = False
def initialize(self, context):
pass
def finalize(self, context):
pass
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.